Merge branch 'feature/VNC-151-investigate-tiger-vnc-h-264-encode-decoder2' into 'master'

VNC-151 Add h264

Closes VNC-151

See merge request kasm-technologies/internal/KasmVNC!192
This commit is contained in:
Matthew McClaskey 2026-02-03 14:38:45 +00:00
commit 7c60b7faba
111 changed files with 3997 additions and 1072 deletions

View File

@ -1,4 +1,4 @@
#!/usr/bin/ruby
#!/usr/bin/env ruby
package_name = ARGV.first

69
.clang-format Normal file
View File

@ -0,0 +1,69 @@
# Generated from CLion C/C++ Code Style settings
---
Language: Cpp
BasedOnStyle: LLVM
AccessModifierOffset: -4
AlignArrayOfStructures: Left
AlignAfterOpenBracket: DontAlign
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignOperands: true
AllowShortFunctionsOnASingleLine: Empty
AlignTrailingComments: false
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BraceWrapping:
AfterCaseLabel: true
AfterClass: false
AfterControlStatement: Never
AfterEnum: true
AfterFunction: false
AfterNamespace: false
AfterStruct: false
AfterUnion: true
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
BeforeLambdaBody: false
BeforeWhile: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBraces: Custom
BreakConstructorInitializers: AfterColon
BreakConstructorInitializersBeforeComma: false
ColumnLimit: 120
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
IncludeCategories:
- Regex: '^<.*'
Priority: 1
- Regex: '^".*'
Priority: 2
- Regex: '.*'
Priority: 3
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentWidth: 4
InsertNewlineAtEOF: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 2
NamespaceIndentation: All
PackConstructorInitializers: Never
SortIncludes: CaseSensitive
SpaceAfterCStyleCast: true
SpaceAfterTemplateKeyword: false
SpaceBeforeRangeBasedForLoopColon: false
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
TabWidth: 4
UseTab: Never
...

View File

@ -241,7 +241,6 @@ run_test_arm64:
allow_failure: true
dependencies:
- build_arm64
artifacts:
artifacts:
when: always
paths:

View File

@ -6,7 +6,7 @@
git submodule init
git submodule update --remote --merge
sudo docker build -t kasmvnc:dev -f builder/dockerfile.ubuntu_jammy.dev .
sudo docker run -it --rm -v ./:/src -p 6901:6901 -p 8443:8443 --name kasmvnc_dev kasmvnc:dev
sudo docker run -it --rm -v ./:/src -p 2222:22 -p 6901:6901 -p 8443:8443 --device=/dev/dri/card0 --device=/dev/dri/renderD128 --group-add video --group-add render --name kasmvnc_dev kasmvnc:dev
```
**The above assumes you are UID 1000 on the host as the container UID is 1000.**
@ -30,7 +30,10 @@ builder/build.sh
Now run Xvnc and Xfce4 from inside the container
```bash
/src/xorg.build/bin/Xvnc -interface 0.0.0.0 -PublicIP 127.0.0.1 -disableBasicAuth -RectThreads 0 -Log *:stdout:100 -httpd /src/kasmweb/dist -sslOnly 0 -SecurityTypes None -websocketPort 6901 -FreeKeyMappings :1 &
mkdir ~/.vnc
openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout ${HOME}/.vnc/self.pem -out ${HOME}/.vnc/self.pem -subj "/C=US/ST=VA/L=None/O=None/OU=DoFu/CN=kasm/emailAddress=none@none.none"
/src/xorg.build/bin/Xvnc -interface 0.0.0.0 -PublicIP 127.0.0.1 -disableBasicAuth -RectThreads 0 -Log *:stdout:100 -httpd /src/kasmweb/dist -sslOnly 1 -SecurityTypes None -websocketPort 6901 -FreeKeyMappings -cert ~/.vnc/self.pem -key ~/.vnc/self.pem -videoCodec h264 :1 &
/usr/bin/xfce4-session --display :1
```

View File

@ -217,6 +217,8 @@ if(ENABLE_PAM)
endif()
set(HAVE_PAM ${ENABLE_PAM})
option(DEBUG_FFMPEG "Debug ffmpeg" OFF)
# Check for SSE2
check_cxx_compiler_flag(-msse2 COMPILER_SUPPORTS_SSE2)
@ -227,6 +229,9 @@ include_directories(${CMAKE_BINARY_DIR})
include(cmake/StaticBuild.cmake)
find_package(PkgConfig REQUIRED)
pkg_check_modules(FFMPEG REQUIRED libavcodec libavformat libavutil libswscale libva)
add_subdirectory(third_party)
add_subdirectory(common)

View File

@ -1,7 +1,7 @@
ARG BASE_IMAGE
FROM $BASE_IMAGE
RUN apk add bash
RUN apk add bash ffmpeg-dev
ENV STARTUPDIR=/dockerstartup

View File

@ -6,7 +6,7 @@ ENV XORG_VER 21.1.10
RUN \
echo "**** install build deps ****" && \
apk add \
apk add --no-cache \
alpine-release \
alpine-sdk \
autoconf \
@ -30,6 +30,7 @@ RUN \
libjpeg-turbo-static \
libpciaccess-dev \
libtool \
libva-dev \
libwebp-dev \
libx11-dev \
libxau-dev \
@ -44,6 +45,7 @@ RUN \
libxshmfence-dev \
libxtst-dev \
mesa-dev \
mesa-va-gallium \
mesa-dri-gallium \
meson \
nettle-dev \
@ -68,8 +70,12 @@ RUN \
xorg-server-common \
xorg-server-dev \
xtrans \
ffmpeg-dev
ffmpeg-dev \
libdrm-dev
RUN if [ "$(uname -m)" = "x86_64" ]; then \
apk add --no-cache intel-media-driver; \
fi
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -6,7 +6,7 @@ ENV XORG_VER 21.1.12
RUN \
echo "**** install build deps ****" && \
apk add \
apk add --no-cache \
alpine-release \
alpine-sdk \
autoconf \
@ -43,8 +43,10 @@ RUN \
libxrandr-dev \
libxshmfence-dev \
libxtst-dev \
libva-dev \
mesa-dev \
mesa-dri-gallium \
mesa-va-gallium \
meson \
nettle-dev \
openssl-dev \
@ -68,8 +70,12 @@ RUN \
xorg-server-common \
xorg-server-dev \
xtrans \
ffmpeg-dev
ffmpeg-dev \
libdrm-dev
RUN if [ "$(uname -m)" = "x86_64" ]; then \
apk add --no-cache intel-media-driver; \
fi
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -6,7 +6,7 @@ ENV XORG_VER 21.1.14
RUN \
echo "**** install build deps ****" && \
apk add \
apk add --no-cache \
alpine-release \
alpine-sdk \
autoconf \
@ -43,8 +43,10 @@ RUN \
libxrandr-dev \
libxshmfence-dev \
libxtst-dev \
libva-dev \
mesa-dev \
mesa-dri-gallium \
mesa-va-gallium \
meson \
nettle-dev \
openssl-dev \
@ -68,8 +70,12 @@ RUN \
xorg-server-common \
xorg-server-dev \
xtrans \
ffmpeg-dev
ffmpeg-dev \
libdrm-dev
RUN if [ "$(uname -m)" = "x86_64" ]; then \
apk add --no-cache intel-media-driver; \
fi
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -6,7 +6,7 @@ ENV XORG_VER 21.1.14
RUN \
echo "**** install build deps ****" && \
apk add \
apk add --no-cache \
alpine-release \
alpine-sdk \
autoconf \
@ -45,6 +45,7 @@ RUN \
libxtst-dev \
mesa-dev \
mesa-dri-gallium \
mesa-va-gallium \
meson \
nettle-dev \
openssl-dev \
@ -68,8 +69,12 @@ RUN \
xorg-server-common \
xorg-server-dev \
xtrans \
ffmpeg-dev
ffmpeg-dev \
libva-dev
RUN if [ "$(uname -m)" = "x86_64" ]; then \
apk add --no-cache intel-media-driver; \
fi
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -24,7 +24,12 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tz
RUN apt-get update && apt-get -y build-dep xorg-server libxfont-dev
RUN apt-get update && apt-get -y install ninja-build cmake nasm git libgnutls28-dev vim wget tightvncserver curl
RUN apt-get update && apt-get -y install libpng-dev libtiff-dev libgif-dev libavcodec-dev libssl-dev libxrandr-dev \
libxcursor-dev libavformat-dev libswscale-dev
libxcursor-dev libavformat-dev libswscale-dev libva-dev
# x86_64 specific operations
RUN if [ "$(arch)" = "x86_64" ]; then \
apt-get update && apt-get install -y intel-media-va-driver-non-free; \
fi
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -14,7 +14,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tz
RUN apt-get update && apt-get -y build-dep xorg-server libxfont-dev
RUN apt-get update && apt-get -y install ninja-build nasm git libgnutls28-dev vim wget tightvncserver curl
RUN apt-get update && apt-get -y install libpng-dev libtiff-dev libgif-dev libavcodec-dev libssl-dev libxrandr-dev \
libxcursor-dev libavformat-dev libswscale-dev
libxcursor-dev libavformat-dev libswscale-dev libva-dev
RUN CMAKE_URL="https://cmake.org/files/v3.22/cmake-3.22.0" && \
ARCH=$(arch) && \

View File

@ -24,7 +24,12 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tz
RUN apt-get update && apt-get -y build-dep xorg-server libxfont-dev
RUN apt-get update && apt-get -y install ninja-build cmake nasm git libgnutls28-dev vim wget tightvncserver curl
RUN apt-get update && apt-get -y install libpng-dev libtiff-dev libgif-dev libavcodec-dev libssl-dev libxrandr-dev \
libxcursor-dev libavformat-dev libswscale-dev
libxcursor-dev libavformat-dev libswscale-dev libva-dev
# x86_64 specific operations
RUN if [ "$(arch)" = "x86_64" ]; then \
apt-get update && apt-get install -y intel-media-va-driver-non-free; \
fi
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -75,7 +75,8 @@ RUN \
xorg-x11-xtrans-devel \
xsltproc \
libavformat-free-devel \
libswscale-free-devel
libswscale-free-devel \
libva-devel
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -75,7 +75,8 @@ RUN \
xorg-x11-xtrans-devel \
xsltproc \
libavformat-free-devel \
libswscale-free-devel
libswscale-free-devel \
libva-devel
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -17,7 +17,7 @@ RUN apt-get update && apt-get -y build-dep xorg-server libxfont-dev
RUN apt-get update && apt-get -y install gcc g++ curl
RUN apt-get update && apt-get -y install ninja-build cmake nasm git libgnutls28-dev vim wget tightvncserver
RUN apt-get update && apt-get -y install libpng-dev libtiff-dev libgif-dev libavcodec-dev libssl-dev libxrandr-dev \
libxcursor-dev libavformat-dev libswscale-dev
libxcursor-dev libavformat-dev libswscale-dev libva-dev
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -46,7 +46,8 @@ RUN zypper install -ny \
xorg-x11-devel \
xorg-x11-server-sdk \
xorg-x11-util-devel \
zlib-devel
zlib-devel \
libva-devel
RUN useradd -u 1000 docker && \
groupadd -g 1000 docker && \

View File

@ -48,7 +48,8 @@ RUN zypper install -ny \
xorg-x11-util-devel \
xorg-x11-server-sdk \
xorg-x11-util-devel \
zlib-devel
zlib-devel \
libva-devel
RUN useradd -u 1000 docker && \
usermod -a -G docker docker

View File

@ -52,7 +52,8 @@ RUN dnf install -y \
libXrandr-devel \
libXtst-devel \
libXcursor-devel \
libSM-devel
libSM-devel \
libva-devel
ENV SCRIPTS_DIR=/tmp/scripts
ENV PKG_CONFIG_PATH=/usr/local/lib64/pkgconfig:${PKG_CONFIG_PATH:-/opt/rh/gcc-toolset-14/root/usr/lib64/pkgconfig}

View File

@ -52,7 +52,9 @@ RUN dnf install -y \
libXrandr-devel \
libXtst-devel \
libXcursor-devel \
libSM-devel
libSM-devel \
libva-devel
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -14,7 +14,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends tzdata
RUN apt-get update && apt-get -y build-dep xorg-server libxfont-dev
RUN apt-get update && apt-get -y install ninja-build nasm git vim wget curl
RUN apt-get update && apt-get -y install libpng-dev libtiff-dev libgif-dev libavcodec-dev libssl-dev libxrandr-dev \
libxcursor-dev libavformat-dev libswscale-dev
libxcursor-dev libavformat-dev libswscale-dev libva-dev gcc-10 g++-10
RUN sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \
--slave /usr/bin/g++ g++ /usr/bin/g++-10 \
--slave /usr/bin/gcov gcov /usr/bin/gcov-10
# x86_64 specific operations
RUN if [ "$(arch)" = "x86_64" ]; then \
apt-get update && apt-get install -y intel-media-va-driver-non-free; \
fi
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -14,7 +14,12 @@ RUN apt-get update && apt-get install -y --no-install-recommends tzdata
RUN apt-get update && apt-get -y build-dep xorg-server libxfont-dev
RUN apt-get update && apt-get -y install ninja-build cmake nasm git libgnutls28-dev vim wget tightvncserver curl
RUN apt-get update && apt-get -y install libpng-dev libtiff-dev libgif-dev libavcodec-dev libssl-dev libxrandr-dev \
libxcursor-dev libavformat-dev libswscale-dev
libxcursor-dev libavformat-dev libswscale-dev libva-dev
# x86_64 specific operations
RUN if [ "$(arch)" = "x86_64" ]; then \
apt-get update && apt-get install -y intel-media-va-driver-non-free; \
fi
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -7,6 +7,7 @@ ENV XORG_PATCH 21
ENV DEBIAN_FRONTEND noninteractive
EXPOSE 6901
EXPOSE 22
USER root
@ -54,18 +55,32 @@ RUN sed -i 's$# deb-src$deb-src$' /etc/apt/sources.list && \
x11proto-dev \
libgbm-dev \
htop \
inotify-tools && \
echo "kasm-user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
libva-dev \
libpam0g-dev \
libbsd-dev \
openssh-server \
inotify-tools
RUN echo "kasm-user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \
mkdir -p /var/run/sshd && \
sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | sudo -E bash -
RUN apt install -y nodejs nginx
# Packages required for VA-API hardware acceleration (Mesa + Intel)
RUN apt install -y vainfo mesa-va-drivers mesa-vulkan-drivers \
intel-media-va-driver-non-free
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR
RUN $SCRIPTS_DIR/build-deps.sh
RUN ssh-keygen -A
RUN echo "kasm-user:password" | chpasswd
USER 1000
WORKDIR /src
ENTRYPOINT /bin/bash
ENTRYPOINT ["/bin/bash", "-c", "sudo /usr/sbin/sshd && exec /bin/bash"]

View File

@ -14,7 +14,12 @@ RUN apt-get update && apt-get install -y --no-install-recommends tzdata
RUN apt-get update && apt-get -y build-dep xorg-server libxfont-dev
RUN apt-get update && apt-get -y install ninja-build cmake nasm git libgnutls28-dev vim wget curl
RUN apt-get update && apt-get -y install libpng-dev libtiff-dev libgif-dev libavcodec-dev libssl-dev libxrandr-dev \
libxcursor-dev libavformat-dev libswscale-dev
libxcursor-dev libavformat-dev libswscale-dev libva-dev
# x86_64 specific operations
RUN if [ "$(arch)" = "x86_64" ]; then \
apt-get update && apt-get install -y intel-media-va-driver-non-free; \
fi
ENV SCRIPTS_DIR=/tmp/scripts
COPY builder/scripts $SCRIPTS_DIR

View File

@ -1,9 +1,15 @@
#!/bin/bash
set -e
set -euo pipefail
source_dir=$(dirname "$0")
echo ">> Building libjpeg-turbo..."
"${source_dir}"/build-libjpeg-turbo
echo ">> Building WebP..."
"${source_dir}"/build-webp
echo ">> Building Intel TBB..."
"${source_dir}"/build-tbb
"${source_dir}"/build-cpuid
echo ">> Building cpuid..."
"${source_dir}"/build-cpuid
echo ">> Building fmt..."
"${source_dir}"/build-fmt

26
builder/scripts/build-fmt Executable file
View File

@ -0,0 +1,26 @@
#!/usr/bin/env bash
set -euo pipefail
build_and_install() {
cmake -S . -B build -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -GNinja
ninja -C build install
}
prepare_source() {
DIR=fmt
cd /tmp
[ -d ./${DIR} ] && rm -rf ./${DIR}
mkdir ${DIR}
LIBCPUID_RELEASE=$(curl -sL "https://api.github.com/repos/fmtlib/fmt/releases/latest" \
| grep '"tag_name":' | sed -E 's/.*"tag_name": "([^"]+)".*/\1/')
curl -Ls "https://github.com/fmtlib/fmt/archive/${LIBCPUID_RELEASE}.tar.gz" | \
tar xzvf - -C ${DIR}/ --strip-components=1
cd ${DIR}
}
prepare_source
build_and_install

View File

@ -21,8 +21,18 @@ prepare_source() {
build_and_install() {
export MAKEFLAGS=-j$(nproc)
./configure --enable-static --disable-shared --enable-threading --enable-sse2 --enable-neon
make
CONFIG_FLAGS=( --enable-static --disable-shared --enable-threading )
ARCH=$(arch)
if [ "$ARCH" = "x86_64" ]; then
CONFIG_FLAGS+=( --enable-sse2 )
elif [ "$ARCH" = "aarch64" ]; then
CONFIG_FLAGS+=( --enable-neon )
else
echo "Unsupported architecture: $ARCH" && exit 1;
fi
./configure "${CONFIG_FLAGS[@]}"
make install
}

View File

@ -7,6 +7,14 @@ current_dir="$(pwd)"
images_list=()
cd kasmweb
npm install
npm run build
cd "$current_dir"
cp -r kasmweb/dist "$current_dir"/builder/www
for file in "$script_dir"/dockerfile.*.build; do
if [[ "$file" == *deb* ]] || [[ "$file" == *apk* ]] || [[ "$file" == *rpm* ]] || [[ "$file" == *www* ]]; then
continue
@ -23,6 +31,10 @@ for file in "$script_dir"/dockerfile.*.build; do
echo "Building docker image: $image_name using file: $file"
rm -rf .cmake CMakeFiles build.ninja cmake_install.cmake cmake_uninstall.cmake CMakeCache.txt config.h
rm -rf unix
git checkout HEAD -- unix/
docker build -f "$file" -t "$image_name" "$current_dir"
exit_code=$?
if [ $exit_code -ne 0 ]; then
@ -30,11 +42,10 @@ for file in "$script_dir"/dockerfile.*.build; do
break
fi
rm -rf .cmake CMakeFiles build.ninja cmake_install.cmake cmake_uninstall.cmake CMakeCache.txt config.h
echo "Running container from image '$image_name'"
# Run the container and capture the exit code
docker run -it -v "$current_dir":/src -v "$current_dir/builder/build":/build "$image_name"
docker run -it -v "$current_dir":/src -v "$current_dir/builder/build":/build --device=/dev/dri:/dev/dri "$image_name"
exit_code=$?
echo "Container for image '$image_name' "

View File

@ -1,36 +1,36 @@
include_directories(${CMAKE_SOURCE_DIR}/common ${CMAKE_SOURCE_DIR}/unix/kasmvncpasswd)
include_directories(${CMAKE_SOURCE_DIR}/common ${CMAKE_SOURCE_DIR}/unix/kasmvncpasswd ${FFMPEG_INCLUDE_DIRS})
set(NETWORK_SOURCES
GetAPIMessager.cxx
Blacklist.cxx
iceip.cxx
Socket.cxx
TcpSocket.cxx
Udp.cxx
cJSON.c
jsonescape.c
websocket.c
websockify.c
GetAPIMessager.cxx
Blacklist.cxx
iceip.cxx
Socket.cxx
TcpSocket.cxx
Udp.cxx
cJSON.c
jsonescape.c
websocket.c
websockify.c
webudp/CRC32.cpp
webudp/WuArena.cpp
webudp/Wu.cpp
webudp/WuCrypto.cpp
webudp/WuHostEpoll.cpp
webudp/WuNetwork.cpp
webudp/WuPool.cpp
webudp/WuQueue.cpp
webudp/WuRng.cpp
webudp/WuSctp.cpp
webudp/WuSdp.cpp
webudp/WuString.cpp
webudp/WuStun.cpp
webudp/CRC32.cpp
webudp/WuArena.cpp
webudp/Wu.cpp
webudp/WuCrypto.cpp
webudp/WuHostEpoll.cpp
webudp/WuNetwork.cpp
webudp/WuPool.cpp
webudp/WuQueue.cpp
webudp/WuRng.cpp
webudp/WuSctp.cpp
webudp/WuSdp.cpp
webudp/WuString.cpp
webudp/WuStun.cpp
${CMAKE_SOURCE_DIR}/unix/kasmvncpasswd/kasmpasswd.c)
${CMAKE_SOURCE_DIR}/unix/kasmvncpasswd/kasmpasswd.c)
if(NOT WIN32)
set(NETWORK_SOURCES ${NETWORK_SOURCES} UnixSocket.cxx)
endif()
if (NOT WIN32)
set(NETWORK_SOURCES ${NETWORK_SOURCES} UnixSocket.cxx)
endif ()
add_library(network STATIC ${NETWORK_SOURCES})

View File

@ -19,8 +19,9 @@
#ifndef __RDR_TYPES_H__
#define __RDR_TYPES_H__
namespace rdr {
#include <cstdint>
namespace rdr {
typedef unsigned char U8;
typedef unsigned short U16;
typedef unsigned int U32;

View File

@ -1,5 +1,5 @@
set(RFB_SOURCES
benchmark.cxx
benchmark/benchmark.cxx
Blacklist.cxx
Congestion.cxx
CConnection.cxx
@ -69,6 +69,15 @@ set(RFB_SOURCES
util.cxx
xxhash.c
ffmpeg.cxx
encoders/SoftwareEncoder.cxx
benchmark/FfmpegFrameFeeder.cpp
encoders/ScreenEncoderManager.cxx
encoders/FFMPEGVAAPIEncoder.cxx
encoders/ScreenEncoderManager.cxx
encoders/VideoEncoderFactory.cxx
encoders/EncoderProbe.cpp
encoders/EncoderConfiguration.cpp
encoders/utils.cpp
)
if (UNIX)
@ -131,8 +140,9 @@ endif ()
find_package(PkgConfig REQUIRED)
pkg_check_modules(FFMPEG REQUIRED libavcodec libavformat libavutil libswscale)
pkg_check_modules(CPUID REQUIRED libcpuid)
pkg_check_modules(FMT REQUIRED fmt)
pkg_check_modules(VIDEO_ACCELERATION REQUIRED libva libva-drm libdrm)
find_package(TBB)
if (TBB_FOUND)
@ -155,7 +165,14 @@ target_include_directories(rfb PRIVATE
${CPUID_INCLUDE_DIRS}
)
target_link_libraries(rfb PUBLIC ${RFB_LIBRARIES} tinyxml2_objs ${TBB_LIBRARIES} ${CPUID_LIBRARIES})
set(RFB_LIBRARIES ${RFB_LIBRARIES} tinyxml2_objs ${TBB_LIBRARIES} ${CPUID_LIBRARIES} ${FMT_LIBRARIES} ${VIDEO_ACCELERATION_LIBRARIES})
if (DEBUG_FFMPEG)
set(RFB_LIBRARIES ${RFB_LIBRARIES} ${FFMPEG_LIBRARIES})
endif ()
target_include_directories(rfb PUBLIC ${VIDEO_ACCELERATION_INCLUDE_DIRS})
target_link_libraries(rfb PUBLIC ${RFB_LIBRARIES})
if (UNIX)
libtool_create_control_file(rfb)

View File

@ -17,19 +17,20 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include <stdio.h>
#include <string.h>
#include <rdr/InStream.h>
#include <rdr/OutStream.h>
#include <rfb/ConnParams.h>
#include <rfb/Exception.h>
#include <rfb/LogWriter.h>
#include <rfb/SMsgHandler.h>
#include <rfb/ServerCore.h>
#include <rfb/clipboardTypes.h>
#include <rfb/encoders/EncoderConfiguration.h>
#include <rfb/encodings.h>
#include <rfb/ledStates.h>
#include <rfb/LogWriter.h>
#include <rfb/clipboardTypes.h>
#include <rfb/ConnParams.h>
#include <rfb/ServerCore.h>
#include <rfb/SMsgHandler.h>
#include <rfb/util.h>
#include <stdio.h>
#include <string.h>
using namespace rfb;
@ -157,7 +158,8 @@ void ConnParams::setEncodings(int nEncodings, const rdr::S32* encodings)
encodings_.clear();
encodings_.insert(encodingRaw);
bool canChangeSettings = !shandler || shandler->canChangeKasmSettings();
const bool canChangeSettings = !shandler || shandler->canChangeKasmSettings();
const bool can_apply = !rfb::Server::ignoreClientSettingsKasm && canChangeSettings;
for (int i = nEncodings-1; i >= 0; i--) {
switch (encodings[i]) {
@ -252,15 +254,12 @@ void ConnParams::setEncodings(int nEncodings, const rdr::S32* encodings)
subsampling = subsample16X;
break;
case pseudoEncodingPreferBandwidth:
if (!rfb::Server::ignoreClientSettingsKasm && canChangeSettings) {
if (can_apply)
Server::preferBandwidth.setParam(true);
clientparlog("preferBandwidth", true);
} else {
clientparlog("preferBandwidth", false);
}
clientparlog("preferBandwidth", can_apply);
break;
case pseudoEncodingMaxVideoResolution:
if (!rfb::Server::ignoreClientSettingsKasm && canChangeSettings)
if (can_apply)
kasmPassed[KASM_MAX_VIDEO_RESOLUTION] = true;
break;
}
@ -283,116 +282,92 @@ void ConnParams::setEncodings(int nEncodings, const rdr::S32* encodings)
clientparlog("fineQualityLevel", fineQualityLevel, true);
}
if (!rfb::Server::ignoreClientSettingsKasm && canChangeSettings) {
if (encodings[i] >= pseudoEncodingJpegVideoQualityLevel0 &&
encodings[i] <= pseudoEncodingJpegVideoQualityLevel9) {
Server::jpegVideoQuality.setParam(encodings[i] - pseudoEncodingJpegVideoQualityLevel0);
clientparlog("jpegVideoQuality", encodings[i] - pseudoEncodingJpegVideoQualityLevel0, true);
}
if (encodings[i] >= pseudoEncodingJpegVideoQualityLevel0 && encodings[i] <= pseudoEncodingJpegVideoQualityLevel9) {
if (can_apply)
Server::jpegVideoQuality.setParam(encodings[i] - pseudoEncodingJpegVideoQualityLevel0);
clientparlog("jpegVideoQuality", encodings[i] - pseudoEncodingJpegVideoQualityLevel0, can_apply);
}
if (encodings[i] >= pseudoEncodingWebpVideoQualityLevel0 &&
encodings[i] <= pseudoEncodingWebpVideoQualityLevel9) {
Server::webpVideoQuality.setParam(encodings[i] - pseudoEncodingWebpVideoQualityLevel0);
clientparlog("webpVideoQuality", encodings[i] - pseudoEncodingWebpVideoQualityLevel0, true);
}
if (encodings[i] >= pseudoEncodingWebpVideoQualityLevel0 && encodings[i] <= pseudoEncodingWebpVideoQualityLevel9) {
if (can_apply)
Server::webpVideoQuality.setParam(encodings[i] - pseudoEncodingWebpVideoQualityLevel0);
clientparlog("webpVideoQuality", encodings[i] - pseudoEncodingWebpVideoQualityLevel0, can_apply);
}
if (encodings[i] >= pseudoEncodingTreatLosslessLevel0 &&
encodings[i] <= pseudoEncodingTreatLosslessLevel10) {
Server::treatLossless.setParam(encodings[i] - pseudoEncodingTreatLosslessLevel0);
clientparlog("treatLossless", encodings[i] - pseudoEncodingTreatLosslessLevel0, true);
}
if (encodings[i] >= pseudoEncodingTreatLosslessLevel0 && encodings[i] <= pseudoEncodingTreatLosslessLevel10) {
if (can_apply)
Server::treatLossless.setParam(encodings[i] - pseudoEncodingTreatLosslessLevel0);
clientparlog("treatLossless", encodings[i] - pseudoEncodingTreatLosslessLevel0, can_apply);
}
if (encodings[i] >= pseudoEncodingDynamicQualityMinLevel0 &&
encodings[i] <= pseudoEncodingDynamicQualityMinLevel9) {
Server::dynamicQualityMin.setParam(encodings[i] - pseudoEncodingDynamicQualityMinLevel0);
clientparlog("dynamicQualityMin", encodings[i] - pseudoEncodingDynamicQualityMinLevel0, true);
}
if (encodings[i] >= pseudoEncodingDynamicQualityMinLevel0 && encodings[i] <= pseudoEncodingDynamicQualityMinLevel9) {
if (can_apply)
Server::dynamicQualityMin.setParam(encodings[i] - pseudoEncodingDynamicQualityMinLevel0);
clientparlog("dynamicQualityMin", encodings[i] - pseudoEncodingDynamicQualityMinLevel0, can_apply);
}
if (encodings[i] >= pseudoEncodingDynamicQualityMaxLevel0 &&
encodings[i] <= pseudoEncodingDynamicQualityMaxLevel9) {
Server::dynamicQualityMax.setParam(encodings[i] - pseudoEncodingDynamicQualityMaxLevel0);
clientparlog("dynamicQualityMax", encodings[i] - pseudoEncodingDynamicQualityMaxLevel0, true);
}
if (encodings[i] >= pseudoEncodingDynamicQualityMaxLevel0 && encodings[i] <= pseudoEncodingDynamicQualityMaxLevel9) {
if (can_apply)
Server::dynamicQualityMax.setParam(encodings[i] - pseudoEncodingDynamicQualityMaxLevel0);
clientparlog("dynamicQualityMax", encodings[i] - pseudoEncodingDynamicQualityMaxLevel0, can_apply);
}
if (encodings[i] >= pseudoEncodingVideoAreaLevel1 &&
encodings[i] <= pseudoEncodingVideoAreaLevel100) {
Server::videoArea.setParam(encodings[i] - pseudoEncodingVideoAreaLevel1 + 1);
clientparlog("videoArea", encodings[i] - pseudoEncodingVideoAreaLevel1 + 1, true);
}
if (encodings[i] >= pseudoEncodingVideoAreaLevel1 && encodings[i] <= pseudoEncodingVideoAreaLevel100) {
if (can_apply)
Server::videoArea.setParam(encodings[i] - pseudoEncodingVideoAreaLevel1 + 1);
clientparlog("videoArea", encodings[i] - pseudoEncodingVideoAreaLevel1 + 1, can_apply);
}
if (encodings[i] >= pseudoEncodingVideoTimeLevel0 &&
encodings[i] <= pseudoEncodingVideoTimeLevel100) {
Server::videoTime.setParam(encodings[i] - pseudoEncodingVideoTimeLevel0);
clientparlog("videoTime", encodings[i] - pseudoEncodingVideoTimeLevel0, true);
}
if (encodings[i] >= pseudoEncodingVideoTimeLevel0 && encodings[i] <= pseudoEncodingVideoTimeLevel100) {
if (can_apply)
Server::videoTime.setParam(encodings[i] - pseudoEncodingVideoTimeLevel0);
clientparlog("videoTime", encodings[i] - pseudoEncodingVideoTimeLevel0, can_apply);
}
if (encodings[i] >= pseudoEncodingVideoOutTimeLevel1 &&
encodings[i] <= pseudoEncodingVideoOutTimeLevel100) {
Server::videoOutTime.setParam(encodings[i] - pseudoEncodingVideoOutTimeLevel1 + 1);
clientparlog("videoOutTime", encodings[i] - pseudoEncodingVideoOutTimeLevel1 + 1, true);
}
if (encodings[i] >= pseudoEncodingVideoOutTimeLevel1 && encodings[i] <= pseudoEncodingVideoOutTimeLevel100) {
if (can_apply)
Server::videoOutTime.setParam(encodings[i] - pseudoEncodingVideoOutTimeLevel1 + 1);
clientparlog("videoOutTime", encodings[i] - pseudoEncodingVideoOutTimeLevel1 + 1, can_apply);
}
if (encodings[i] >= pseudoEncodingFrameRateLevel10 &&
encodings[i] <= pseudoEncodingFrameRateLevel60) {
Server::frameRate.setParam(encodings[i] - pseudoEncodingFrameRateLevel10 + 10);
clientparlog("frameRate", encodings[i] - pseudoEncodingFrameRateLevel10 + 10, true);
}
if (encodings[i] >= pseudoEncodingFrameRateLevel10 && encodings[i] <= pseudoEncodingFrameRateLevel60) {
if (can_apply)
Server::frameRate.setParam(encodings[i] - pseudoEncodingFrameRateLevel10 + 10);
clientparlog("frameRate", encodings[i] - pseudoEncodingFrameRateLevel10 + 10, can_apply);
}
if (encodings[i] >= pseudoEncodingVideoScalingLevel0 &&
encodings[i] <= pseudoEncodingVideoScalingLevel9) {
Server::videoScaling.setParam(encodings[i] - pseudoEncodingVideoScalingLevel0);
clientparlog("videoScaling", encodings[i] - pseudoEncodingVideoScalingLevel0, true);
}
} else {
if (encodings[i] >= pseudoEncodingJpegVideoQualityLevel0 &&
encodings[i] <= pseudoEncodingJpegVideoQualityLevel9) {
clientparlog("jpegVideoQuality", encodings[i] - pseudoEncodingJpegVideoQualityLevel0, false);
}
if (encodings[i] >= pseudoEncodingVideoScalingLevel0 && encodings[i] <= pseudoEncodingVideoScalingLevel9) {
if (can_apply)
Server::videoScaling.setParam(encodings[i] - pseudoEncodingVideoScalingLevel0);
clientparlog("videoScaling", encodings[i] - pseudoEncodingVideoScalingLevel0, can_apply);
}
if (encodings[i] >= pseudoEncodingWebpVideoQualityLevel0 &&
encodings[i] <= pseudoEncodingWebpVideoQualityLevel9) {
clientparlog("webpVideoQuality", encodings[i] - pseudoEncodingWebpVideoQualityLevel0, false);
}
// encs.push(encodings.pseudoEncodingStreamingMode + this.streamMode);
if (encodings[i] >= pseudoEncodingTreatLosslessLevel0 &&
encodings[i] <= pseudoEncodingTreatLosslessLevel10) {
clientparlog("treatLossless", encodings[i] - pseudoEncodingTreatLosslessLevel0, false);
}
// if (encodings[i] >= pseudoEncodingHardwareProfile0 && encodings[i] <= pseudoEncodingHardwareProfile4) {
// if (appliable)
// Server::hardwareProfile.setParam(encodings[i] - pseudoEncodingHardwareProfile0);
// clientparlog("hardwareProfile", encodings[i] - pseudoEncodingHardwareProfile0, appliable);
// }
if (encodings[i] >= pseudoEncodingDynamicQualityMinLevel0 &&
encodings[i] <= pseudoEncodingDynamicQualityMinLevel9) {
clientparlog("dynamicQualityMin", encodings[i] - pseudoEncodingDynamicQualityMinLevel0, false);
}
if (encodings[i] >= pseudoEncodingGOP1 && encodings[i] <= pseudoEncodingGOP60) {
if (can_apply)
Server::groupOfPicture.setParam(encodings[i] - pseudoEncodingGOP1);
clientparlog("groupOfPicture", encodings[i] - pseudoEncodingGOP1, can_apply);
}
if (encodings[i] >= pseudoEncodingDynamicQualityMaxLevel0 &&
encodings[i] <= pseudoEncodingDynamicQualityMaxLevel9) {
clientparlog("dynamicQualityMax", encodings[i] - pseudoEncodingDynamicQualityMaxLevel0, false);
}
if (encodings[i] >= pseudoEncodingStreamingVideoQualityLevel0 && encodings[i] <= pseudoEncodingStreamingVideoQualityLevel63) {
const auto &config = EncoderConfiguration::get_configuration(encoder);
const auto value = config.max_quality - encodings[i] + pseudoEncodingStreamingVideoQualityLevel0;
if (can_apply)
Server::videoQualityCRFCQP.setParam(value);
clientparlog("videoQualityCRFCQP", value, can_apply);
}
if (encodings[i] >= pseudoEncodingVideoAreaLevel1 &&
encodings[i] <= pseudoEncodingVideoAreaLevel100) {
clientparlog("videoArea", encodings[i] - pseudoEncodingVideoAreaLevel1 + 1, false);
}
if (encodings[i] >= pseudoEncodingVideoTimeLevel0 &&
encodings[i] <= pseudoEncodingVideoTimeLevel100) {
clientparlog("videoTime", encodings[i] - pseudoEncodingVideoTimeLevel0, false);
}
if (encodings[i] >= pseudoEncodingVideoOutTimeLevel1 &&
encodings[i] <= pseudoEncodingVideoOutTimeLevel100) {
clientparlog("videoOutTime", encodings[i] - pseudoEncodingVideoOutTimeLevel1 + 1, false);
}
if (encodings[i] >= pseudoEncodingFrameRateLevel10 &&
encodings[i] <= pseudoEncodingFrameRateLevel60) {
clientparlog("frameRate", encodings[i] - pseudoEncodingFrameRateLevel10 + 10, false);
}
if (encodings[i] >= pseudoEncodingVideoScalingLevel0 &&
encodings[i] <= pseudoEncodingVideoScalingLevel9) {
clientparlog("videoScaling", encodings[i] - pseudoEncodingVideoScalingLevel0, false);
}
if (encodings[i] >=pseudoEncodingStreamingModeAV1QSV && encodings[i] <= pseudoEncodingStreamingModeJpegWebp) {
if (can_apply)
encoder = KasmVideoEncoders::from_encoding(encodings[i]);
clientparlog("Encoder", encodings[i], can_apply);
}
if (encodings[i] > 0)

View File

@ -1,16 +1,16 @@
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved.
* Copyright 2014 Pierre Ossman for Cendio AB
*
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
@ -29,18 +29,19 @@
#include <rfb/Cursor.h>
#include <rfb/PixelFormat.h>
#include <rfb/ScreenSet.h>
#include <rfb/encoders/KasmVideoEncoders.h>
namespace rdr { class InStream; }
namespace rfb {
const int subsampleUndefined = -1;
const int subsampleNone = 0;
const int subsampleGray = 1;
const int subsample2X = 2;
const int subsample4X = 3;
const int subsample8X = 4;
const int subsample16X = 5;
constexpr int subsampleUndefined = -1;
constexpr int subsampleNone = 0;
constexpr int subsampleGray = 1;
constexpr int subsample2X = 2;
constexpr int subsample4X = 3;
constexpr int subsample8X = 4;
constexpr int subsample16X = 5;
class SMsgHandler;
@ -91,7 +92,7 @@ namespace rfb {
void setEncodings(int nEncodings, const rdr::S32* encodings);
unsigned int ledState() { return ledState_; }
unsigned int ledState() const { return ledState_; }
void setLEDState(unsigned int state);
rdr::U32 clipboardFlags() const { return clipFlags; }
@ -143,6 +144,8 @@ namespace rfb {
};
bool kasmPassed[KASM_NUM_SETTINGS];
KasmVideoEncoders::Encoder encoder{KasmVideoEncoders::Encoder::unavailable};
KasmVideoEncoders::Encoders available_encoders;
private:

View File

@ -34,17 +34,21 @@
#include <rfb/Exception.h>
#include <rfb/Watermark.h>
#include <rfb/RawEncoder.h>
#include <rfb/RREEncoder.h>
#include <execution>
#include <rfb/HextileEncoder.h>
#include <rfb/ZRLEEncoder.h>
#include <rfb/RREEncoder.h>
#include <rfb/RawEncoder.h>
#include <rfb/TightEncoder.h>
#include <rfb/TightJPEGEncoder.h>
#include <rfb/TightWEBPEncoder.h>
#include <rfb/TightQOIEncoder.h>
#include <execution>
#include <rfb/TightWEBPEncoder.h>
#include <rfb/ZRLEEncoder.h>
#include <tbb/parallel_for.h>
#include "encoders/EncoderProbe.h"
#include "encoders/ScreenEncoderManager.h"
#include "encoders/VideoEncoder.h"
using namespace rfb;
static LogWriter vlog("EncodeManager");
@ -54,14 +58,14 @@ static LogWriter vlog("EncodeManager");
// Split each rectangle into smaller ones no larger than this area,
// and no wider than this width.
static const int SubRectMaxArea = 65536;
static const int SubRectMaxWidth = 2048;
static constexpr int SubRectMaxArea = 65536;
static constexpr int SubRectMaxWidth = 2048;
// The size in pixels of either side of each block tested when looking
// for solid blocks.
static const int SolidSearchBlock = 16;
static constexpr int SolidSearchBlock = 16;
// Don't bother with blocks smaller than this
static const int SolidBlockMinArea = 2048;
static constexpr int SolidBlockMinArea = 2048;
namespace rfb {
@ -74,6 +78,7 @@ enum EncoderClass {
encoderTightWEBP,
encoderTightQOI,
encoderZRLE,
encoderKasmVideo,
encoderClassMax,
};
@ -119,6 +124,8 @@ static const char *encoderClassName(EncoderClass klass)
return "Tight (QOI)";
case encoderZRLE:
return "ZRLE";
case encoderKasmVideo:
return "KasmVideo";
case encoderClassMax:
break;
}
@ -160,56 +167,69 @@ static void updateMaxVideoRes(uint16_t *x, uint16_t *y) {
}
}
EncodeManager::EncodeManager(SConnection* conn_, EncCache *encCache_) : conn(conn_),
dynamicQualityMin(-1), dynamicQualityOff(-1),
areaCur(0), videoDetected(false), videoTimer(this),
watermarkStats(0),
maxEncodingTime(0), framesSinceEncPrint(0),
encCache(encCache_)
EncodeManager::EncodeManager(SConnection *conn_, EncCache *encCache_, const FFmpeg& ffmpeg_, const video_encoders::EncoderProbe &encoder_probe_) :
conn(conn_), dynamicQualityMin(-1), dynamicQualityOff(-1), areaCur(0), videoDetected(false), videoTimer(this),
watermarkStats(0), maxEncodingTime(0), framesSinceEncPrint(0), ffmpeg(ffmpeg_), ffmpeg_available(ffmpeg.is_available()),
encoder_probe(encoder_probe_), encCache(encCache_)
{
StatsVector::iterator iter;
encoders.resize(encoderClassMax, nullptr);
activeEncoders.resize(encoderTypeMax, encoderRaw);
encoders.resize(encoderClassMax, NULL);
activeEncoders.resize(encoderTypeMax, encoderRaw);
encoders[encoderRaw] = new RawEncoder(conn);
encoders[encoderRRE] = new RREEncoder(conn);
encoders[encoderHextile] = new HextileEncoder(conn);
encoders[encoderTight] = new TightEncoder(conn);
encoders[encoderTightJPEG] = new TightJPEGEncoder(conn);
encoders[encoderTightWEBP] = new TightWEBPEncoder(conn);
encoders[encoderTightQOI] = new TightQOIEncoder(conn);
encoders[encoderZRLE] = new ZRLEEncoder(conn);
encoders[encoderRaw] = new RawEncoder(conn);
encoders[encoderRRE] = new RREEncoder(conn);
encoders[encoderHextile] = new HextileEncoder(conn);
encoders[encoderTight] = new TightEncoder(conn);
encoders[encoderTightJPEG] = new TightJPEGEncoder(conn);
encoders[encoderTightWEBP] = new TightWEBPEncoder(conn);
encoders[encoderTightQOI] = new TightQOIEncoder(conn);
encoders[encoderZRLE] = new ZRLEEncoder(conn);
if (ffmpeg_available) {
encoders[encoderKasmVideo] = new ScreenEncoderManager(ffmpeg,
encoder_probe.get_best_encoder(),
encoder_probe.get_available_encoders(),
conn,
encoder_probe.get_drm_device_path(),
{conn_->cp.width,
conn_->cp.height,
static_cast<uint8_t>(Server::frameRate),
static_cast<uint8_t>(Server::groupOfPicture),
static_cast<uint8_t>(Server::videoQualityCRFCQP)});
}
webpBenchResult = ((TightWEBPEncoder *) encoders[encoderTightWEBP])->benchmark();
vlog.info("WEBP benchmark result: %u ms", webpBenchResult);
video_mode_available = ffmpeg_available && Server::videoCodec[0];
unsigned videoTime = rfb::Server::videoTime;
if (videoTime < 1) videoTime = 1;
//areaPercentages = new unsigned char[videoTime * rfb::Server::frameRate]();
// maximum possible values, as they may change later at runtime
areaPercentages = new unsigned char[2000 * 60]();
webpBenchResult = ((TightWEBPEncoder *) encoders[encoderTightWEBP])->benchmark();
vlog.info("WEBP benchmark result: %u ms", webpBenchResult);
if (!rfb::Server::videoTime)
videoDetected = true;
unsigned videoTime = rfb::Server::videoTime;
if (videoTime < 1)
videoTime = 1;
// areaPercentages = new unsigned char[videoTime * rfb::Server::frameRate]();
// maximum possible values, as they may change later at runtime
areaPercentages = new unsigned char[2000 * 60]();
updateMaxVideoRes(&maxVideoX, &maxVideoY);
if (!rfb::Server::videoTime)
videoDetected = true;
updates = 0;
memset(&copyStats, 0, sizeof(copyStats));
stats.resize(encoderClassMax);
for (iter = stats.begin();iter != stats.end();++iter) {
StatsVector::value_type::iterator iter2;
iter->resize(encoderTypeMax);
for (iter2 = iter->begin();iter2 != iter->end();++iter2)
memset(&*iter2, 0, sizeof(EncoderStats));
}
updateMaxVideoRes(&maxVideoX, &maxVideoY);
if (Server::dynamicQualityMax && Server::dynamicQualityMax <= 9 &&
Server::dynamicQualityMax > Server::dynamicQualityMin) {
dynamicQualityMin = Server::dynamicQualityMin;
dynamicQualityOff = Server::dynamicQualityMax - Server::dynamicQualityMin;
}
updates = 0;
memset(&copyStats, 0, sizeof(copyStats));
stats.resize(encoderClassMax);
for (auto iter = stats.begin(); iter != stats.end(); ++iter)
{
StatsVector::value_type::iterator iter2;
iter->resize(encoderTypeMax);
for (iter2 = iter->begin(); iter2 != iter->end(); ++iter2)
memset(&*iter2, 0, sizeof(EncoderStats));
}
if (Server::dynamicQualityMax && Server::dynamicQualityMax <= 9 &&
Server::dynamicQualityMax > Server::dynamicQualityMin) {
dynamicQualityMin = Server::dynamicQualityMin;
dynamicQualityOff = Server::dynamicQualityMax - Server::dynamicQualityMin;
}
const auto num_cores = cpu_info::cores_count;
arena.initialize(num_cores);
@ -217,17 +237,15 @@ EncodeManager::EncodeManager(SConnection* conn_, EncCache *encCache_) : conn(con
EncodeManager::~EncodeManager()
{
std::vector<Encoder*>::iterator iter;
logStats();
logStats();
delete[] areaPercentages;
delete [] areaPercentages;
for (auto iter = encoders.begin(); iter != encoders.end(); ++iter)
delete *iter;
for (iter = encoders.begin();iter != encoders.end();iter++)
delete *iter;
for (std::list<QualityInfo*>::iterator it = qualityList.begin(); it != qualityList.end(); it++)
delete *it;
for (auto it = qualityList.begin(); it != qualityList.end(); ++it)
delete *it;
}
void EncodeManager::logStats()
@ -335,38 +353,38 @@ void EncodeManager::pruneLosslessRefresh(const Region& limits)
lossyRegion.assign_intersect(limits);
}
void EncodeManager::writeUpdate(const UpdateInfo& ui, const PixelBuffer* pb,
void EncodeManager::writeUpdate(const UpdateInfo& ui, const ScreenSet &layout, const PixelBuffer* pb,
const RenderedCursor* renderedCursor,
size_t maxUpdateSize)
{
curMaxUpdateSize = maxUpdateSize;
doUpdate(true, ui.changed, ui.copied, ui.copy_delta, ui.copypassed, pb, renderedCursor);
doUpdate(true, ui.changed, ui.copied, ui.copy_delta, ui.copypassed, layout, pb, renderedCursor);
}
void EncodeManager::writeLosslessRefresh(const Region& req, const PixelBuffer* pb,
void EncodeManager::writeLosslessRefresh(const Region& req, const ScreenSet &layout, const PixelBuffer* pb,
const RenderedCursor* renderedCursor,
size_t maxUpdateSize)
{
if (videoDetected)
if (videoDetected || video_mode_available)
return;
doUpdate(false, getLosslessRefresh(req, maxUpdateSize),
Region(), Point(), std::vector<CopyPassRect>(), pb, renderedCursor);
Region(), Point(), std::vector<CopyPassRect>(), layout, pb, renderedCursor);
}
void EncodeManager::doUpdate(bool allowLossy, const Region& changed_,
const Region& copied, const Point& copyDelta,
const std::vector<CopyPassRect>& copypassed,
const ScreenSet &layout,
const PixelBuffer* pb,
const RenderedCursor* renderedCursor)
{
const RenderedCursor* renderedCursor) {
int nRects;
Region changed, cursorRegion;
struct timeval start;
updates++;
if (conn->cp.supportsUdp)
((network::UdpStream *) conn->getOutStream(conn->cp.supportsUdp))->setFrameNumber(updates);
((network::UdpStream *) conn->getOutStream(conn->cp.supportsUdp))->setFrameNumber(updates);
// The video resolution may have changed, check it
@ -376,12 +394,12 @@ void EncodeManager::doUpdate(bool allowLossy, const Region& changed_,
// The dynamic quality params may have changed
if (Server::dynamicQualityMax && Server::dynamicQualityMax <= 9 &&
Server::dynamicQualityMax > Server::dynamicQualityMin) {
dynamicQualityMin = Server::dynamicQualityMin;
dynamicQualityOff = Server::dynamicQualityMax - Server::dynamicQualityMin;
} else if (Server::dynamicQualityMin >= 0) {
dynamicQualityMin = Server::dynamicQualityMin;
dynamicQualityOff = 0;
}
dynamicQualityMin = Server::dynamicQualityMin;
dynamicQualityOff = Server::dynamicQualityMax - Server::dynamicQualityMin;
} else if (Server::dynamicQualityMin >= 0) {
dynamicQualityMin = Server::dynamicQualityMin;
dynamicQualityOff = 0;
}
prepareEncoders(allowLossy);
@ -400,20 +418,20 @@ void EncodeManager::doUpdate(bool allowLossy, const Region& changed_,
* magical pixel buffer, so split it out from the changed region.
*/
if (renderedCursor != NULL) {
cursorRegion = changed.intersect(renderedCursor->getEffectiveRect());
changed.assign_subtract(renderedCursor->getEffectiveRect());
cursorRegion = changed.intersect(renderedCursor->getEffectiveRect());
changed.assign_subtract(renderedCursor->getEffectiveRect());
}
if (conn->cp.supportsLastRect)
nRects = 0xFFFF;
nRects = 0xFFFF;
else {
nRects = copied.numRects();
nRects += copypassed.size();
nRects += computeNumRects(changed);
nRects += computeNumRects(cursorRegion);
nRects = copied.numRects();
nRects += copypassed.size();
nRects += computeNumRects(changed);
nRects += computeNumRects(cursorRegion);
if (watermarkData)
nRects++;
if (watermarkData)
nRects++;
}
conn->writer()->writeFramebufferUpdateStart(nRects);
@ -421,17 +439,23 @@ void EncodeManager::doUpdate(bool allowLossy, const Region& changed_,
writeCopyRects(copied, copyDelta);
writeCopyPassRects(copypassed);
/*
* We start by searching for solid rects, which are then removed
* from the changed region.
*/
if (conn->cp.supportsLastRect && !conn->cp.supportsQOI)
writeSolidRects(&changed, pb);
bool video_mode = video_mode_available && conn->cp.encoder != KasmVideoEncoders::Encoder::unavailable;
if (video_mode) {
video_mode = updateVideo(changed, layout, pb);
}
writeRects(changed, pb,
&start, true);
if (!videoDetected) // In case detection happened between the calls
writeRects(cursorRegion, renderedCursor);
if (!video_mode) {
/*
* We start by searching for solid rects, which are then removed
* from the changed region.
*/
if (conn->cp.supportsLastRect && !conn->cp.supportsQOI)
writeSolidRects(&changed, pb);
writeRects(changed, pb, &start, true);
if (!videoDetected) // In case detection happened between the calls
writeRects(cursorRegion, renderedCursor);
}
if (watermarkData && conn->sendWatermark()) {
beforeLength = conn->getOutStream(conn->cp.supportsUdp)->length();
@ -452,23 +476,61 @@ void EncodeManager::doUpdate(bool allowLossy, const Region& changed_,
updateQualities();
printf("TOTAL FRAME TOOK: %d\n", msSince(&start));
conn->writer()->writeFramebufferUpdateEnd();
}
bool EncodeManager::updateVideo(const Region &changed, const ScreenSet &layout, const PixelBuffer *pb) {
auto *screen_encoder_manager = dynamic_cast<ScreenEncoderManager<> *>(encoders[encoderKasmVideo]);
if (!screen_encoder_manager)
return false;
if (screen_encoder_manager->get_encoder() != conn->cp.encoder) {
delete encoders[encoderKasmVideo];
screen_encoder_manager = new ScreenEncoderManager(ffmpeg,
conn->cp.encoder,
encoder_probe.get_available_encoders(),
conn,
encoder_probe.get_drm_device_path(),
{0,
0,
static_cast<uint8_t>(Server::frameRate),
static_cast<uint8_t>(Server::groupOfPicture),
static_cast<uint8_t>(Server::videoQualityCRFCQP)});
if (!screen_encoder_manager)
return false;
encoders[encoderKasmVideo] = screen_encoder_manager;
}
if (!screen_encoder_manager->sync_layout(layout, changed))
return false;
static const Palette palette;
screen_encoder_manager->writeRect(pb, palette);
std::vector<Rect> rects;
changed.get_rects(&rects);
updateVideoStats(rects, pb);
return true;
}
void EncodeManager::prepareEncoders(bool allowLossy)
{
enum EncoderClass solid, bitmap, bitmapRLE;
enum EncoderClass indexed, indexedRLE, fullColour;
EncoderClass bitmap, bitmapRLE;
EncoderClass indexedRLE, fullColour;
rdr::S32 preferred;
std::vector<int>::iterator iter;
solid = bitmap = bitmapRLE = encoderRaw;
indexed = indexedRLE = fullColour = encoderRaw;
auto solid = bitmap = bitmapRLE = encoderRaw;
auto indexed = indexedRLE = fullColour = encoderRaw;
// Try to respect the client's wishes
preferred = conn->getPreferredEncoding();
const auto preferred = conn->getPreferredEncoding();
const bool isHighBppSupported = conn->cp.pf().bpp >= 16;
const bool isHighBppLossyAllowed = isHighBppSupported && allowLossy;
switch (preferred) {
case encodingRRE:
// Horrible for anything high frequency and/or lots of colours
@ -479,14 +541,11 @@ void EncodeManager::prepareEncoders(bool allowLossy)
bitmapRLE = indexedRLE = fullColour = encoderHextile;
break;
case encodingTight:
if (encoders[encoderTightQOI]->isSupported() &&
(conn->cp.pf().bpp >= 16))
if (encoders[encoderTightQOI]->isSupported() && isHighBppSupported)
fullColour = encoderTightQOI;
else if (encoders[encoderTightWEBP]->isSupported() &&
(conn->cp.pf().bpp >= 16) && allowLossy)
else if (encoders[encoderTightWEBP]->isSupported() && isHighBppLossyAllowed)
fullColour = encoderTightWEBP;
else if (encoders[encoderTightJPEG]->isSupported() &&
(conn->cp.pf().bpp >= 16) && allowLossy)
else if (encoders[encoderTightJPEG]->isSupported() && isHighBppLossyAllowed)
fullColour = encoderTightJPEG;
else
fullColour = encoderTight;
@ -503,14 +562,11 @@ void EncodeManager::prepareEncoders(bool allowLossy)
// Any encoders still unassigned?
if (fullColour == encoderRaw) {
if (encoders[encoderTightQOI]->isSupported() &&
(conn->cp.pf().bpp >= 16))
if (encoders[encoderTightQOI]->isSupported() && isHighBppSupported)
fullColour = encoderTightQOI;
else if (encoders[encoderTightWEBP]->isSupported() &&
(conn->cp.pf().bpp >= 16) && allowLossy)
else if (encoders[encoderTightWEBP]->isSupported() && isHighBppLossyAllowed)
fullColour = encoderTightWEBP;
else if (encoders[encoderTightJPEG]->isSupported() &&
(conn->cp.pf().bpp >= 16) && allowLossy)
else if (encoders[encoderTightJPEG]->isSupported() && isHighBppLossyAllowed)
fullColour = encoderTightJPEG;
else if (encoders[encoderZRLE]->isSupported())
fullColour = encoderZRLE;
@ -562,10 +618,8 @@ void EncodeManager::prepareEncoders(bool allowLossy)
activeEncoders[encoderIndexedRLE] = indexedRLE;
activeEncoders[encoderFullColour] = fullColour;
for (iter = activeEncoders.begin(); iter != activeEncoders.end(); ++iter) {
Encoder *encoder;
encoder = encoders[*iter];
for (const auto activeEncoder : activeEncoders) {
auto *encoder = encoders[activeEncoder];
encoder->setCompressLevel(conn->cp.compressLevel);
encoder->setQualityLevel(conn->cp.qualityLevel);
@ -636,7 +690,7 @@ int EncodeManager::computeNumRects(const Region& changed)
// No split necessary?
if ((((w*h) < SubRectMaxArea) && (w < SubRectMaxWidth)) ||
(videoDetected && !encoders[encoderTightWEBP]->isSupported())) {
(videoDetected && !video_mode_available && !encoders[encoderTightWEBP]->isSupported())) {
numRects += 1;
continue;
}
@ -655,56 +709,60 @@ int EncodeManager::computeNumRects(const Region& changed)
return numRects;
}
Encoder *EncodeManager::startRect(const Rect& rect, int type, const bool trackQuality,
const uint8_t isWebp)
{
Encoder *encoder;
int klass, equiv;
Encoder *EncodeManager::startRect(const Rect &rect, int type, const bool trackQuality, const startRectOverride overrider) {
activeType = type;
activeType = type;
klass = activeEncoders[activeType];
if (isWebp)
klass = encoderTightWEBP;
int klass;
beforeLength = conn->getOutStream(conn->cp.supportsUdp)->length();
switch (overrider) {
case STARTRECT_OVERRIDE_WEBP:
klass = encoderTightWEBP;
break;
case STARTRECT_OVERRIDE_KASMVIDEO:
klass = encoderKasmVideo;
break;
default:
klass = activeEncoders[activeType];
}
stats[klass][activeType].rects++;
stats[klass][activeType].pixels += rect.area();
equiv = 12 + rect.area() * (conn->cp.pf().bpp/8);
stats[klass][activeType].equivalent += equiv;
beforeLength = static_cast<int>(conn->getOutStream(conn->cp.supportsUdp)->length());
encoder = encoders[klass];
conn->writer()->startRect(rect, encoder->encoding);
stats[klass][activeType].rects++;
stats[klass][activeType].pixels += rect.area();
const int equiv = 12 + rect.area() * (conn->cp.pf().bpp >> 3);
stats[klass][activeType].equivalent += equiv;
if (type == encoderFullColour && dynamicQualityMin > -1 && trackQuality) {
trackRectQuality(rect);
Encoder *encoder = encoders[klass];
conn->writer()->startRect(rect, encoder->encoding);
// Set the dynamic quality here. Unset fine quality, as it would overrule us
encoder->setQualityLevel(scaledQuality(rect));
encoder->setFineQualityLevel(-1, subsampleUndefined);
}
if (type == encoderFullColour && dynamicQualityMin > -1 && trackQuality) {
trackRectQuality(rect);
if (encoder->flags & EncoderLossy && (!encoder->treatLossless() || videoDetected))
lossyRegion.assign_union(Region(rect));
else
lossyRegion.assign_subtract(Region(rect));
// Set the dynamic quality here. Unset fine quality, as it would overrule us
encoder->setQualityLevel(scaledQuality(rect));
encoder->setFineQualityLevel(-1, subsampleUndefined);
}
return encoder;
if (encoder->flags & EncoderLossy && (!encoder->treatLossless() || videoDetected))
lossyRegion.assign_union(Region(rect));
else
lossyRegion.assign_subtract(Region(rect));
return encoder;
}
void EncodeManager::endRect(const uint8_t isWebp)
void EncodeManager::endRect(const startRectOverride overrider)
{
int klass;
int length;
conn->writer()->endRect();
const auto length = conn->getOutStream(conn->cp.supportsUdp)->length() - beforeLength;
auto klass = activeEncoders[activeType];
conn->writer()->endRect();
if (overrider == STARTRECT_OVERRIDE_WEBP)
klass = encoderTightWEBP;
else if (overrider == STARTRECT_OVERRIDE_KASMVIDEO)
klass = encoderKasmVideo;
length = conn->getOutStream(conn->cp.supportsUdp)->length() - beforeLength;
klass = activeEncoders[activeType];
if (isWebp)
klass = encoderTightWEBP;
stats[klass][activeType].bytes += length;
stats[klass][activeType].bytes += length;
}
void EncodeManager::writeCopyPassRects(const std::vector<CopyPassRect>& copypassed)
@ -903,17 +961,14 @@ bool EncodeManager::handleTimeout(Timer* t)
void EncodeManager::updateVideoStats(const std::vector<Rect> &rects, const PixelBuffer* pb)
{
std::vector<Rect>::const_iterator rect;
uint32_t i;
if (!rfb::Server::videoTime) {
videoDetected = true;
return;
}
if (!rfb::Server::videoTime) {
videoDetected = true;
return;
}
unsigned area = 0;
const unsigned samples = rfb::Server::videoTime * rfb::Server::frameRate;
for (rect = rects.begin(); rect != rects.end(); ++rect) {
for (auto rect = rects.begin(); rect != rects.end(); ++rect) {
area += rect->area();
}
area *= 100;
@ -924,7 +979,7 @@ void EncodeManager::updateVideoStats(const std::vector<Rect> &rects, const Pixel
areaCur %= samples;
area = 0;
for (i = 0; i < samples; i++)
for (uint32_t i = 0; i < samples; i++)
area += areaPercentages[i];
area /= samples;
@ -1127,7 +1182,7 @@ void EncodeManager::writeRects(const Region& changed, const PixelBuffer* pb,
updateVideoStats(rects, pb);
}
if (videoDetected) {
if (videoDetected && !video_mode_available) {
rects.clear();
rects.push_back(pb->getRect());
}
@ -1143,7 +1198,7 @@ void EncodeManager::writeRects(const Region& changed, const PixelBuffer* pb,
// No split necessary?
if ((((w*h) < SubRectMaxArea) && (w < SubRectMaxWidth)) ||
(videoDetected && !encoders[encoderTightWEBP]->isSupported())) {
(videoDetected && !video_mode_available && !encoders[encoderTightWEBP]->isSupported())) {
subrects.push_back(rect);
trackRectQuality(rect);
continue;
@ -1188,7 +1243,7 @@ void EncodeManager::writeRects(const Region& changed, const PixelBuffer* pb,
gettimeofday(&scalestart, NULL);
const PixelBuffer *scaledpb = NULL;
if (videoDetected &&
if (videoDetected && !video_mode_available &&
(maxVideoX < pb->getRect().width() || maxVideoY < pb->getRect().height())) {
const float xdiff = maxVideoX / (float) pb->getRect().width();
const float ydiff = maxVideoY / (float) pb->getRect().height();
@ -1263,7 +1318,7 @@ void EncodeManager::writeRects(const Region& changed, const PixelBuffer* pb,
if (maxEncodingTime < encodingTime)
maxEncodingTime = encodingTime;
if (framesSinceEncPrint >= rfb::Server::frameRate) {
if (framesSinceEncPrint >= (unsigned) rfb::Server::frameRate) {
vlog.info("Max encoding time during the last %u frames: %u ms (limit %u, near limit %.0f)",
framesSinceEncPrint, maxEncodingTime, 1000/rfb::Server::frameRate,
1000/rfb::Server::frameRate * 0.8f);
@ -1278,7 +1333,7 @@ void EncodeManager::writeRects(const Region& changed, const PixelBuffer* pb,
for (uint32_t i = 0; i < subrects_size; ++i) {
if (encCache->enabled && !compresseds[i].empty() && !fromCache[i] &&
!encoders[encoderTightQOI]->isSupported()) {
!encoders[encoderTightQOI]->isSupported()) {
void *tmp = malloc(compresseds[i].size());
memcpy(tmp, &compresseds[i][0], compresseds[i].size());
encCache->add(isWebp[i] ? encoderTightWEBP : encoderTightJPEG,
@ -1302,12 +1357,11 @@ uint8_t EncodeManager::getEncoderType(const Rect& rect, const PixelBuffer *pb,
struct RectInfo info;
unsigned int maxColours = 256;
PixelBuffer *ppb;
Encoder *encoder;
bool useRLE;
EncoderType type;
encoder = encoders[activeEncoders[encoderIndexedRLE]];
const Encoder *encoder = encoders[activeEncoders[encoderIndexedRLE]];
if (maxColours > encoder->maxPaletteSize)
maxColours = encoder->maxPaletteSize;
encoder = encoders[activeEncoders[encoderIndexed]];
@ -1357,7 +1411,9 @@ uint8_t EncodeManager::getEncoderType(const Rect& rect, const PixelBuffer *pb,
struct timeval start;
gettimeofday(&start, NULL);
if (encCache->enabled &&
if (encCache && video_mode_available) {
// nop, send this as a skip rect
} else if (encCache->enabled &&
(data = encCache->get(activeEncoders[encoderFullColour],
rect.tl.x, rect.tl.y, rect.width(), rect.height(),
len))) {
@ -1428,7 +1484,7 @@ void EncodeManager::writeSubRect(const Rect& rect, const PixelBuffer *pb,
PixelBuffer *ppb;
Encoder *encoder;
encoder = startRect(rect, type, compressed.size() == 0, isWebp);
encoder = startRect(rect, type, compressed.size() == 0, isWebp ? STARTRECT_OVERRIDE_WEBP : STARTRECT_NO_OVERRIDE);
if (compressed.size()) {
if (isWebp) {
@ -1444,7 +1500,7 @@ void EncodeManager::writeSubRect(const Rect& rect, const PixelBuffer *pb,
jpegstats.area += rect.area();
jpegstats.rects++;
}
} else {
} else {
if (encoder->flags & EncoderUseNativePF) {
ppb = preparePixelBuffer(rect, pb, false);
} else {
@ -1455,7 +1511,7 @@ void EncodeManager::writeSubRect(const Rect& rect, const PixelBuffer *pb,
delete ppb;
}
endRect(isWebp);
endRect(isWebp ? STARTRECT_OVERRIDE_WEBP : STARTRECT_NO_OVERRIDE);
}
bool EncodeManager::checkSolidTile(const Rect& r, const rdr::U8* colourValue,

View File

@ -30,10 +30,19 @@
#include <rfb/Timer.h>
#include <rfb/UpdateTracker.h>
#include <stdint.h>
#include <atomic>
#include <tbb/task_arena.h>
#include <sys/time.h>
#include <tbb/task_arena.h>
#include "ScreenSet.h"
#include "ffmpeg.h"
#include <rfb/encoders/EncoderProbe.h>
enum startRectOverride {
STARTRECT_NO_OVERRIDE,
STARTRECT_OVERRIDE_WEBP,
STARTRECT_OVERRIDE_KASMVIDEO,
};
namespace rfb {
class SConnection;
@ -50,7 +59,7 @@ namespace rfb {
class EncodeManager: public Timer::Callback {
public:
EncodeManager(SConnection* conn, EncCache *encCache);
EncodeManager(SConnection* conn, EncCache *encCache, const FFmpeg& ffmpeg, const video_encoders::EncoderProbe &encoder_probe_);
~EncodeManager() override;
void logStats();
@ -61,11 +70,11 @@ namespace rfb {
bool needsLosslessRefresh(const Region& req);
void pruneLosslessRefresh(const Region& limits);
void writeUpdate(const UpdateInfo& ui, const PixelBuffer* pb,
void writeUpdate(const UpdateInfo& ui, const ScreenSet &layout, const PixelBuffer* pb,
const RenderedCursor* renderedCursor,
size_t maxUpdateSize = 2000);
void writeLosslessRefresh(const Region& req, const PixelBuffer* pb,
void writeLosslessRefresh(const Region& req, const ScreenSet &layout, const PixelBuffer* pb,
const RenderedCursor* renderedCursor,
size_t maxUpdateSize);
@ -94,31 +103,35 @@ namespace rfb {
void doUpdate(bool allowLossy, const Region& changed,
const Region& copied, const Point& copy_delta,
const std::vector<CopyPassRect> &copypassed,
const ScreenSet &layout,
const PixelBuffer* pb,
const RenderedCursor* renderedCursor);
bool updateVideo(const Region& changed, const ScreenSet &layout, const PixelBuffer* pb);
void prepareEncoders(bool allowLossy);
Region getLosslessRefresh(const Region& req, size_t maxUpdateSize);
int computeNumRects(const Region& changed);
Encoder *startRect(const Rect& rect, int type, const bool trackQuality = true,
const uint8_t isWebp = 0);
void endRect(const uint8_t isWebp = 0);
Encoder *startRect(const Rect& rect, int type, bool trackQuality = true,
enum startRectOverride overrider = STARTRECT_NO_OVERRIDE);
void endRect(enum startRectOverride overrider = STARTRECT_NO_OVERRIDE);
void writeCopyRects(const Region& copied, const Point& delta);
void writeCopyPassRects(const std::vector<CopyPassRect>& copypassed);
void writeSolidRects(Region *changed, const PixelBuffer* pb);
void findSolidRect(const Rect& rect, Region *changed, const PixelBuffer* pb);
void writeRects(const Region& changed, const PixelBuffer* pb,
const struct timeval *start = NULL,
const bool mainScreen = false);
const struct timeval *start = nullptr,
bool mainScreen = false);
void checkWebpFallback(const struct timeval *start);
void updateVideoStats(const std::vector<Rect> &rects, const PixelBuffer* pb);
void writeSubRect(const Rect& rect, const PixelBuffer *pb, const uint8_t type,
void writeSubRect(const Rect& rect, const PixelBuffer *pb, uint8_t type,
const Palette& pal, const std::vector<uint8_t> &compressed,
const uint8_t isWebp);
uint8_t isWebp);
uint8_t getEncoderType(const Rect& rect, const PixelBuffer *pb, Palette *pal,
std::vector<uint8_t> &compressed, uint8_t *isWebp,
@ -144,8 +157,8 @@ namespace rfb {
void updateQualities();
void trackRectQuality(const Rect& rect);
unsigned getQuality(const Rect& rect) const;
unsigned scaledQuality(const Rect& rect) const;
[[nodiscard]] unsigned getQuality(const Rect& rect) const;
[[nodiscard]] unsigned scaledQuality(const Rect& rect) const;
protected:
// Preprocessor generated, optimised methods
@ -207,6 +220,11 @@ namespace rfb {
unsigned maxEncodingTime, framesSinceEncPrint;
unsigned scalingTime;
const FFmpeg &ffmpeg;
bool ffmpeg_available;
bool video_mode_available{false};
const video_encoders::EncoderProbe &encoder_probe;
EncCache *encCache;
class OffsetPixelBuffer : public FullFramePixelBuffer {

View File

@ -23,16 +23,11 @@
using namespace rfb;
Encoder::Encoder(SConnection *conn_, int encoding_,
enum EncoderFlags flags_, unsigned int maxPaletteSize_) :
encoding(encoding_), flags(flags_),
maxPaletteSize(maxPaletteSize_), conn(conn_)
{
}
Encoder::Encoder(SConnection *conn_, int encoding_, EncoderFlags flags_, unsigned int maxPaletteSize_) :
encoding(encoding_), flags(flags_), maxPaletteSize(maxPaletteSize_), conn(conn_), id(UndefinedId) {}
Encoder::~Encoder()
{
}
Encoder::Encoder(Id id_, SConnection *conn_, int encoding_, EncoderFlags flags_, unsigned int maxPaletteSize_) :
encoding(encoding_), flags(flags_), maxPaletteSize(maxPaletteSize_), conn(conn_), id(id_) {}
void Encoder::writeSolidRect(int width, int height,
const PixelFormat& pf, const rdr::U8* colour)

View File

@ -21,7 +21,7 @@
#define __RFB_ENCODER_H__
#include <rdr/types.h>
#include <rfb/Rect.h>
#include <rfb/SConnection.h>
namespace rfb {
class SConnection;
@ -41,14 +41,16 @@ namespace rfb {
class Encoder {
public:
Encoder(SConnection* conn, int encoding,
enum EncoderFlags flags, unsigned int maxPaletteSize);
virtual ~Encoder();
using Id = uint32_t;
static constexpr auto UndefinedId = std::numeric_limits<Id>::max();
Encoder(SConnection* conn, int encoding, EncoderFlags flags, unsigned int maxPaletteSize);
Encoder(Id id, SConnection* conn, int encoding, EncoderFlags flags, unsigned int maxPaletteSize);
virtual ~Encoder() = default;
// isSupported() should return a boolean indicating if this encoder
// is okay to use with the current connection. This usually involves
// checking the list of encodings in the connection parameters.
virtual bool isSupported()=0;
virtual bool isSupported() const = 0;
virtual void setCompressLevel(int level) {};
virtual void setQualityLevel(int level) {};
@ -82,6 +84,8 @@ namespace rfb {
const PixelFormat& pf,
const rdr::U8* colour)=0;
[[nodiscard]] Id getId() const { return id; }
protected:
// Helper method for redirecting a single colour palette to the
// short cut method.
@ -89,13 +93,14 @@ namespace rfb {
public:
const int encoding;
const enum EncoderFlags flags;
const EncoderFlags flags;
// Maximum size of the palette per rect
const unsigned int maxPaletteSize;
protected:
SConnection* conn;
SConnection* conn;
Id id;
};
}

View File

@ -49,11 +49,7 @@ HextileEncoder::HextileEncoder(SConnection* conn) :
{
}
HextileEncoder::~HextileEncoder()
{
}
bool HextileEncoder::isSupported()
bool HextileEncoder::isSupported() const
{
return conn->cp.supportsEncoding(encodingHextile);
}

View File

@ -23,15 +23,13 @@
namespace rfb {
class HextileEncoder : public Encoder {
public:
HextileEncoder(SConnection* conn);
virtual ~HextileEncoder();
virtual bool isSupported();
virtual void writeRect(const PixelBuffer* pb, const Palette& palette);
virtual void writeSolidRect(int width, int height,
const PixelFormat& pf,
const rdr::U8* colour);
};
class HextileEncoder : public Encoder {
public:
HextileEncoder(SConnection* conn);
~HextileEncoder() override = default;
bool isSupported() const override;
void writeRect(const PixelBuffer* pb, const Palette& palette) override;
void writeSolidRect(int width, int height, const PixelFormat &pf, const rdr::U8 *colour) override;
};
}
#endif

View File

@ -41,11 +41,7 @@ RREEncoder::RREEncoder(SConnection* conn) :
{
}
RREEncoder::~RREEncoder()
{
}
bool RREEncoder::isSupported()
bool RREEncoder::isSupported() const
{
return conn->cp.supportsEncoding(encodingRRE);
}

View File

@ -28,12 +28,12 @@ namespace rfb {
class RREEncoder : public Encoder {
public:
RREEncoder(SConnection* conn);
virtual ~RREEncoder();
virtual bool isSupported();
virtual void writeRect(const PixelBuffer* pb, const Palette& palette);
virtual void writeSolidRect(int width, int height,
~RREEncoder() override = default;
bool isSupported() const override;
void writeRect(const PixelBuffer* pb, const Palette& palette) override;
void writeSolidRect(int width, int height,
const PixelFormat& pf,
const rdr::U8* colour);
const rdr::U8* colour) override;
private:
rdr::MemOutStream mos;
ManagedPixelBuffer bufferCopy;

View File

@ -29,11 +29,7 @@ RawEncoder::RawEncoder(SConnection* conn) :
{
}
RawEncoder::~RawEncoder()
{
}
bool RawEncoder::isSupported()
bool RawEncoder::isSupported() const
{
// Implicitly required;
return true;

View File

@ -26,12 +26,12 @@ namespace rfb {
class RawEncoder : public Encoder {
public:
RawEncoder(SConnection* conn);
virtual ~RawEncoder();
virtual bool isSupported();
virtual void writeRect(const PixelBuffer* pb, const Palette& palette);
virtual void writeSolidRect(int width, int height,
~RawEncoder() override = default;
bool isSupported() const override;
void writeRect(const PixelBuffer* pb, const Palette& palette) override;
void writeSolidRect(int width, int height,
const PixelFormat& pf,
const rdr::U8* colour);
const rdr::U8* colour) override;
};
}
#endif

View File

@ -148,7 +148,7 @@ void SConnection::processVersionMsg()
// cope with legacy 3.3 client only if "no authentication" or "vnc
// authentication" is supported.
for (i=secTypes.begin(); i!=secTypes.end(); i++) {
for (i=secTypes.begin(); i!=secTypes.end(); ++i) {
if (*i == secTypeNone || *i == secTypeVncAuth) break;
}
if (i == secTypes.end()) {
@ -170,7 +170,7 @@ void SConnection::processVersionMsg()
throwConnFailedException("No supported security types");
os->writeU8(secTypes.size());
for (i=secTypes.begin(); i!=secTypes.end(); i++)
for (i=secTypes.begin(); i!=secTypes.end(); ++i)
os->writeU8(*i);
os->flush();
state_ = RFBSTATE_SECURITY_TYPE;
@ -192,7 +192,7 @@ void SConnection::processSecurityType(int secType)
std::list<rdr::U8>::iterator i;
secTypes = security.GetEnabledSecTypes();
for (i=secTypes.begin(); i!=secTypes.end(); i++)
for (i=secTypes.begin(); i!=secTypes.end(); ++i)
if (*i == secType) break;
if (i == secTypes.end())
throw Exception("Requested security type not available");
@ -408,10 +408,9 @@ void SConnection::announceClipboard(bool available)
void SConnection::writeFakeColourMap(void)
{
int i;
rdr::U16 red[256], green[256], blue[256];
for (i = 0;i < 256;i++)
for (int i = 0;i < 256;i++)
cp.pf().rgbFromPixel(i, &red[i], &green[i], &blue[i]);
writer()->writeSetColourMapEntries(0, 256, red, green, blue);

View File

@ -68,7 +68,7 @@ namespace rfb {
// later, after queryConnection() has returned. It can only be called when
// in state RFBSTATE_QUERYING. On rejection, an AuthFailureException is
// thrown, so this must be handled appropriately by the caller.
void approveConnection(bool accept, const char* reason=0);
void approveConnection(bool accept, const char* reason=nullptr);
// Overridden from SMsgHandler
@ -155,7 +155,7 @@ namespace rfb {
// authenticated() returns true if the client has authenticated
// successfully.
bool authenticated() { return (state_ == RFBSTATE_INITIALISATION ||
bool authenticated() const { return (state_ == RFBSTATE_INITIALISATION ||
state_ == RFBSTATE_NORMAL); }
// throwConnFailedException() prints a message to the log, sends a conn
@ -211,7 +211,7 @@ namespace rfb {
std::vector<binaryClipboard_t> binaryClipboard;
private:
void writeFakeColourMap(void);
void writeFakeColourMap();
bool readyForSetColourMapEntries;

View File

@ -101,6 +101,7 @@ namespace rfb {
virtual void subscribeUnixRelay(const char *name) = 0;
virtual void unixRelay(const char *name, const rdr::U8 *buf, const unsigned len) = 0;
virtual void videoEncodersRequest(const std::vector<int32_t> &encoders) = 0;
ConnParams cp;
};

View File

@ -41,10 +41,6 @@ SMsgReader::SMsgReader(SMsgHandler* handler_, rdr::InStream* is_)
{
}
SMsgReader::~SMsgReader()
{
}
void SMsgReader::readClientInit()
{
bool shared = is->readU8();
@ -53,7 +49,7 @@ void SMsgReader::readClientInit()
void SMsgReader::readMsg()
{
int msgType = is->readU8();
const int msgType = is->readU8();
switch (msgType) {
case msgTypeSetPixelFormat:
readSetPixelFormat();
@ -106,6 +102,9 @@ void SMsgReader::readMsg()
case msgTypeUnixRelay:
readUnixRelay();
break;
case msgTypeVideoEncoders:
readVideoEncodersRequest();
break;
case msgTypeKeepAlive:
readKeepAlive();
break;
@ -413,3 +412,13 @@ void SMsgReader::readUnixRelay()
handler->unixRelay(name, buf, len);
}
void SMsgReader::readVideoEncodersRequest() const {
const auto len = is->readU8();
std::vector<int32_t> buf(len);
for (int i = 0; i < len; ++i)
buf[i] = is->readU32();
handler->videoEncodersRequest(buf);
}

View File

@ -32,7 +32,7 @@ namespace rfb {
class SMsgReader {
public:
SMsgReader(SMsgHandler* handler, rdr::InStream* is);
virtual ~SMsgReader();
virtual ~SMsgReader() = default;
void readClientInit();
@ -68,6 +68,7 @@ namespace rfb {
void readSubscribeUnixRelay();
void readUnixRelay();
void readVideoEncodersRequest() const;
SMsgHandler* handler;
rdr::InStream* is;

View File

@ -1,38 +1,33 @@
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved.
* Copyright (C) 2011 D. R. Commander. All Rights Reserved.
* Copyright 2009-2017 Pierre Ossman for Cendio AB
*
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include <stdio.h>
#include <string>
#include <rdr/OutStream.h>
#include <rdr/MemOutStream.h>
#include <rdr/ZlibOutStream.h>
#include <rfb/msgTypes.h>
#include <rfb/fenceTypes.h>
#include <rfb/clipboardTypes.h>
#include <rfb/Exception.h>
#include <rfb/ConnParams.h>
#include <rfb/UpdateTracker.h>
#include <rfb/Encoder.h>
#include <rfb/SMsgWriter.h>
#include <rfb/Exception.h>
#include <rfb/LogWriter.h>
#include <rfb/SMsgWriter.h>
#include <rfb/UpdateTracker.h>
#include <rfb/encoders/EncoderConfiguration.h>
#include <rfb/fenceTypes.h>
#include <rfb/ledStates.h>
#include <rfb/msgTypes.h>
using namespace rfb;
@ -50,10 +45,6 @@ SMsgWriter::SMsgWriter(ConnParams* cp_, rdr::OutStream* os_, rdr::OutStream* udp
{
}
SMsgWriter::~SMsgWriter()
{
}
void SMsgWriter::writeServerInit()
{
os->writeU16(cp->width);
@ -778,6 +769,37 @@ void SMsgWriter::writeUnixRelay(const char *name, const rdr::U8 *buf, const unsi
endMsg();
}
void SMsgWriter::writeVideoEncoders(const std::vector<int32_t> &encoders) {
startMsg(msgTypeVideoEncoders);
std::vector<int32_t> conjunction;
for (const auto encoder: cp->available_encoders) {
if (std::find(encoders.begin(), encoders.end(), KasmVideoEncoders::to_streaming_mode(encoder)) != encoders.end()) {
conjunction.push_back(KasmVideoEncoders::to_encoding(encoder));
}
}
const uint8_t size = conjunction.size();
os->writeU8(size);
for (auto encoder: conjunction) {
os->writeS32(encoder);
const auto &config = EncoderConfiguration::get_configuration(KasmVideoEncoders::from_encoding(encoder));
os->writeS32(config.min_quality);
os->writeS32(config.max_quality);
os->writeU8(config.presets.size());
for (const auto &preset_value: config.presets) {
os->writeS32(preset_value);
}
}
endMsg();
}
void SMsgWriter::writeUserJoinedSession(const std::string& username)
{
startMsg(msgTypeUserAddedToSession);

View File

@ -25,7 +25,6 @@
#include <string>
#include <rdr/types.h>
#include <rfb/encodings.h>
#include <rfb/ScreenSet.h>
#include <rfb/SConnection.h>
#include <vector>
@ -40,7 +39,7 @@ namespace rfb {
class SMsgWriter {
public:
SMsgWriter(ConnParams* cp, rdr::OutStream* os, rdr::OutStream *udps);
virtual ~SMsgWriter();
virtual ~SMsgWriter() = default;
// writeServerInit() must only be called at the appropriate time in the
// protocol initialisation.
@ -132,6 +131,7 @@ namespace rfb {
void writeSubscribeUnixRelay(const bool success, const char *msg);
void writeUnixRelay(const char *name, const rdr::U8 *buf, const unsigned len);
void writeVideoEncoders(const std::vector<int32_t> &encoders);
void writeUserJoinedSession(const std::string& username);
void writeUserLeftSession(const std::string& username);

View File

@ -1,15 +1,15 @@
/* Copyright 2009 Pierre Ossman for Cendio AB
*
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
@ -21,131 +21,143 @@
#ifndef __RFB_SCREENSET_INCLUDED__
#define __RFB_SCREENSET_INCLUDED__
#include <stdio.h>
#include <string.h>
#include <cstdio>
#include <cstring>
#include <vector>
#include <algorithm>
#include <rdr/types.h>
#include <rfb/Rect.h>
#include <list>
#include <set>
namespace rfb {
// rfb::Screen
//
// Represents a single RFB virtual screen, which includes
// coordinates, an id and flags.
// rfb::Screen
//
// Represents a single RFB virtual screen, which includes
// coordinates, an id and flags.
struct Screen {
Screen(void) : id(0), flags(0) {};
Screen(rdr::U32 id_, int x_, int y_, int w_, int h_, rdr::U32 flags_) :
id(id_), dimensions(x_, y_, x_+w_, y_+h_), flags(flags_) {};
struct Screen {
Screen() = default;
Screen(uint32_t id_, int x_, int y_, int w_, int h_, uint32_t flags_) :
id(id_), dimensions(x_, y_, x_ + w_, y_ + h_), flags(flags_) {};
inline bool operator==(const Screen& r) const {
if (id != r.id)
return false;
if (!dimensions.equals(r.dimensions))
return false;
if (flags != r.flags)
return false;
return true;
}
bool operator==(const Screen &r) const {
if (id != r.id)
return false;
if (!dimensions.equals(r.dimensions))
return false;
if (flags != r.flags)
return false;
return true;
}
rdr::U32 id;
Rect dimensions;
rdr::U32 flags;
};
// rfb::ScreenSet
//
// Represents a complete screen configuration, excluding framebuffer
// dimensions.
struct ScreenSet {
ScreenSet(void) {};
typedef std::list<Screen>::iterator iterator;
typedef std::list<Screen>::const_iterator const_iterator;
inline iterator begin(void) { return screens.begin(); };
inline const_iterator begin(void) const { return screens.begin(); };
inline iterator end(void) { return screens.end(); };
inline const_iterator end(void) const { return screens.end(); };
inline int num_screens(void) const { return screens.size(); };
inline void add_screen(const Screen screen) { screens.push_back(screen); };
inline void remove_screen(rdr::U32 id) {
std::list<Screen>::iterator iter, nextiter;
for (iter = screens.begin();iter != screens.end();iter = nextiter) {
nextiter = iter; nextiter++;
if (iter->id == id)
screens.erase(iter);
}
}
inline bool validate(int fb_width, int fb_height) const {
std::list<Screen>::const_iterator iter;
std::set<rdr::U32> seen_ids;
Rect fb_rect;
if (screens.empty())
return false;
if (num_screens() > 255)
return false;
fb_rect.setXYWH(0, 0, fb_width, fb_height);
for (iter = screens.begin();iter != screens.end();++iter) {
if (iter->dimensions.is_empty())
return false;
if (!iter->dimensions.enclosed_by(fb_rect))
return false;
if (seen_ids.find(iter->id) != seen_ids.end())
return false;
seen_ids.insert(iter->id);
}
return true;
uint32_t id{};
Rect dimensions;
uint32_t flags{};
};
inline void print(char* str, size_t len) const {
char buffer[128];
std::list<Screen>::const_iterator iter;
snprintf(buffer, sizeof(buffer), "%d screen(s)\n", num_screens());
str[0] = '\0';
strncat(str, buffer, len - 1 - strlen(str));
for (iter = screens.begin();iter != screens.end();++iter) {
snprintf(buffer, sizeof(buffer),
" %10d (0x%08x): %dx%d+%d+%d (flags 0x%08x)\n",
(int)iter->id, (unsigned)iter->id,
iter->dimensions.width(), iter->dimensions.height(),
iter->dimensions.tl.x, iter->dimensions.tl.y,
(unsigned)iter->flags);
strncat(str, buffer, len - 1 - strlen(str));
}
// rfb::ScreenSet
//
// Represents a complete screen configuration, excluding framebuffer
// dimensions.
struct ScreenSet {
static constexpr int MAX_SCREENS = 255;
ScreenSet() = default;
using iterator = std::vector<Screen>::iterator;
using const_iterator = std::vector<Screen>::const_iterator;
iterator begin() {
return screens.begin();
}
[[nodiscard]] const_iterator begin() const {
return screens.begin();
}
iterator end() {
return screens.end();
}
[[nodiscard]] const_iterator end() const {
return screens.end();
}
[[nodiscard]] int num_screens() const {
return static_cast<int>(screens.size());
}
void add_screen(const Screen &screen) {
screens.push_back(screen);
std::sort(screens.begin(), screens.end(), compare_screen);
}
void remove_screen(rdr::U32 id) {
//std::erase_if(screens, [id](const Screen &screen) { return screen.id == id; });
screens.erase(std::remove_if(screens.begin(), screens.end(), [id](const Screen &screen) { return screen.id == id; }), screens.end());
}
[[nodiscard]] bool validate(int fb_width, int fb_height) const {
std::set<uint32_t> seen_ids;
Rect fb_rect;
if (screens.empty())
return false;
if (num_screens() > MAX_SCREENS)
return false;
fb_rect.setXYWH(0, 0, fb_width, fb_height);
for (const auto &screen: screens) {
if (screen.dimensions.is_empty())
return false;
if (!screen.dimensions.enclosed_by(fb_rect))
return false;
if (seen_ids.contains(screen.id))
return false;
seen_ids.insert(screen.id);
}
return true;
};
void print(char *str, size_t len) const {
char buffer[128];
snprintf(buffer, sizeof(buffer), "%d screen(s)\n", num_screens());
str[0] = '\0';
strncat(str, buffer, len - 1 - strlen(str));
for (auto &screen: screens) {
snprintf(buffer,
sizeof(buffer),
" %10d (0x%08x): %dx%d+%d+%d (flags 0x%08x)\n",
static_cast<int>(screen.id),
static_cast<unsigned>(screen.id),
screen.dimensions.width(),
screen.dimensions.height(),
screen.dimensions.tl.x,
screen.dimensions.tl.y,
static_cast<unsigned>(screen.flags));
strncat(str, buffer, len - 1 - strlen(str));
}
};
bool operator==(const ScreenSet &r) const {
auto a = screens;
//std::sort(a.begin(), a.end(), compare_screen);
auto b = r.screens;
//std::sort(b.begin(), b.end(), compare_screen);
return a == b;
}
bool operator!=(const ScreenSet &r) const {
return !operator==(r);
}
std::vector<Screen> screens;
private:
static bool compare_screen(const Screen &first, const Screen &second) {
return first.id < second.id;
}
};
inline bool operator==(const ScreenSet& r) const {
std::list<Screen> a = screens;
a.sort(compare_screen);
std::list<Screen> b = r.screens;
b.sort(compare_screen);
return a == b;
};
inline bool operator!=(const ScreenSet& r) const { return !operator==(r); }
std::list<Screen> screens;
private:
static inline bool compare_screen(const Screen& first, const Screen& second)
{
return first.id < second.id;
}
};
};
}; // namespace rfb
#endif

View File

@ -262,7 +262,18 @@ rfb::BoolParameter rfb::Server::printVideoArea
("PrintVideoArea",
"Print the detected video area % value.",
false);
rfb::IntParameter rfb::Server::videoQualityCRFCQP
("VideoQualityCRFCQP",
"The CRF/CPQ value to use when encoding video",
17, 0, 63);
rfb::IntParameter rfb::Server::groupOfPicture
("GroupOfPicture",
"The number of frames to group together for encoding",
24, 0, 100);
rfb::StringParameter rfb::Server::driNode
("drinode",
"Path to the hardware acceleration device (e.g. /dev/dri/renderD128)",
"");
rfb::StringParameter rfb::Server::kasmPasswordFile
("KasmPasswordFile",
"Password file for BasicAuth, created with the kasmvncpasswd utility.",
@ -287,6 +298,11 @@ rfb::IntParameter rfb::Server::udpPort
"Which port to use for UDP. Default same as websocket",
0, 0, 65535);
rfb::StringParameter rfb::Server::videoCodec
("videoCodec",
"If set, use this codec to send a video stream for WebCodecs. Supported options: auto, h264, h264_vaapi, h265, h265_vaapi, av1, av1_vaapi",
"");
static void bandwidthPreset() {
rfb::Server::dynamicQualityMin.setParam(2);
rfb::Server::dynamicQualityMax.setParam(9);

View File

@ -68,11 +68,15 @@ namespace rfb {
static IntParameter videoOutTime;
static IntParameter videoArea;
static IntParameter videoScaling;
static IntParameter videoQualityCRFCQP;
static IntParameter groupOfPicture;
static StringParameter driNode;
static IntParameter udpFullFrameFrequency;
static IntParameter udpPort;
static StringParameter kasmPasswordFile;
static StringParameter publicIP;
static StringParameter stunServer;
static StringParameter videoCodec;
static BoolParameter printVideoArea;
static BoolParameter protocol3_3;
static BoolParameter alwaysShared;

View File

@ -62,11 +62,7 @@ TightEncoder::TightEncoder(SConnection* conn) :
setCompressLevel(-1);
}
TightEncoder::~TightEncoder()
{
}
bool TightEncoder::isSupported()
bool TightEncoder::isSupported() const
{
return conn->cp.supportsEncoding(encodingTight);
}

View File

@ -29,16 +29,16 @@ namespace rfb {
class TightEncoder : public Encoder {
public:
TightEncoder(SConnection* conn);
virtual ~TightEncoder();
~TightEncoder() override = default;
virtual bool isSupported();
bool isSupported() const override;
virtual void setCompressLevel(int level);
void setCompressLevel(int level) override;
virtual void writeRect(const PixelBuffer* pb, const Palette& palette);
virtual void writeSolidRect(int width, int height,
void writeRect(const PixelBuffer* pb, const Palette& palette) override;
void writeSolidRect(int width, int height,
const PixelFormat& pf,
const rdr::U8* colour);
const rdr::U8* colour) override;
void writeWatermarkRect(const rdr::U8 *data, const unsigned len,
const rdr::U8 r,
const rdr::U8 g,

View File

@ -76,11 +76,7 @@ TightJPEGEncoder::TightJPEGEncoder(SConnection* conn) :
{
}
TightJPEGEncoder::~TightJPEGEncoder()
{
}
bool TightJPEGEncoder::isSupported()
bool TightJPEGEncoder::isSupported() const
{
if (!conn->cp.supportsEncoding(encodingTight))
return false;

View File

@ -30,22 +30,22 @@ namespace rfb {
class TightJPEGEncoder : public Encoder {
public:
TightJPEGEncoder(SConnection* conn);
virtual ~TightJPEGEncoder();
~TightJPEGEncoder() override = default;
virtual bool isSupported();
bool isSupported() const override;
virtual void setQualityLevel(int level);
virtual void setFineQualityLevel(int quality, int subsampling);
void setQualityLevel(int level) override;
void setFineQualityLevel(int quality, int subsampling) override;
virtual bool treatLossless();
bool treatLossless() override;
virtual void writeRect(const PixelBuffer* pb, const Palette& palette);
void writeRect(const PixelBuffer* pb, const Palette& palette) override;
virtual void compressOnly(const PixelBuffer* pb, const uint8_t quality,
std::vector<uint8_t> &out, const bool lowVideoQuality) const;
virtual void writeOnly(const std::vector<uint8_t> &out) const;
virtual void writeSolidRect(int width, int height,
void writeSolidRect(int width, int height,
const PixelFormat& pf,
const rdr::U8* colour);
const rdr::U8* colour) override;
protected:
void writeCompact(rdr::U32 value, rdr::OutStream* os) const;

View File

@ -156,11 +156,7 @@ TightQOIEncoder::TightQOIEncoder(SConnection* conn) :
{
}
TightQOIEncoder::~TightQOIEncoder()
{
}
bool TightQOIEncoder::isSupported()
bool TightQOIEncoder::isSupported() const
{
if (!conn->cp.supportsEncoding(encodingTight))
return false;

View File

@ -27,17 +27,17 @@ namespace rfb {
class TightQOIEncoder : public Encoder {
public:
TightQOIEncoder(SConnection* conn);
virtual ~TightQOIEncoder();
~TightQOIEncoder() override = default;
virtual bool isSupported();
bool isSupported() const override;
virtual void writeRect(const PixelBuffer* pb, const Palette& palette);
void writeRect(const PixelBuffer* pb, const Palette& palette) override;
virtual void compressOnly(const PixelBuffer* pb, const uint8_t quality,
std::vector<uint8_t> &out, const bool lowVideoQuality) const;
virtual void writeOnly(const std::vector<uint8_t> &out) const;
virtual void writeSolidRect(int width, int height,
void writeSolidRect(int width, int height,
const PixelFormat& pf,
const rdr::U8* colour);
const rdr::U8* colour) override;
protected:
void writeCompact(rdr::U32 value, rdr::OutStream* os) const;

View File

@ -90,7 +90,7 @@ TightWEBPEncoder::~TightWEBPEncoder()
{
}
bool TightWEBPEncoder::isSupported()
bool TightWEBPEncoder::isSupported() const
{
if (!conn->cp.supportsEncoding(encodingTight))
return false;

View File

@ -29,7 +29,7 @@ namespace rfb {
TightWEBPEncoder(SConnection* conn);
virtual ~TightWEBPEncoder();
virtual bool isSupported();
bool isSupported() const override;
virtual void setQualityLevel(int level);
virtual void setFineQualityLevel(int quality, int subsampling);

View File

@ -16,7 +16,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include <network/GetAPI.h>
#include <network/TcpSocket.h>
@ -36,18 +36,19 @@
#define XK_MISCELLANY
#define XK_XKB_KEYS
#include <rfb/keysymdef.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdint.h>
#include <cctype>
#include <cstdlib>
#include <cstdint>
#include <wordexp.h>
#include "encoders/EncoderProbe.h"
#include "kasmpasswd.h"
using namespace rfb;
static LogWriter vlog("VNCSConnST");
static Cursor emptyCursor(0, 0, Point(0, 0), NULL);
static Cursor emptyCursor(0, 0, Point(0, 0), nullptr);
namespace {
const rdr::U32 CLIENT_KEEPALIVE_KEYSYM = 1;
@ -57,19 +58,19 @@ extern rfb::BoolParameter disablebasicauth;
extern "C" char unixrelaynames[MAX_UNIX_RELAYS][MAX_UNIX_RELAY_NAME_LEN];
VNCSConnectionST::VNCSConnectionST(VNCServerST* server_, network::Socket *s,
VNCSConnectionST::VNCSConnectionST(VNCServerST* server_, network::Socket *s, const video_encoders::EncoderProbe &encoder_probe,
bool reverse)
: upgradingToUdp(false), sock(s), reverseConnection(reverse),
inProcessMessages(false),
pendingSyncFence(false), syncFence(false), fenceFlags(0),
fenceDataLen(0), fenceData(NULL), congestionTimer(this),
fenceDataLen(0), fenceData(nullptr), congestionTimer(this),
losslessTimer(this), kbdLogTimer(this), binclipTimer(this),
server(server_), updates(false),
updateRenderedCursor(false), removeRenderedCursor(false),
continuousUpdates(false), encodeManager(this, &VNCServerST::encCache),
continuousUpdates(false), encodeManager(this, &VNCServerST::encCache, FFmpeg::get(), encoder_probe),
needsPermCheck(false), pointerEventTime(0),
clientHasCursor(false),
accessRights(AccessDefault), startTime(time(0)), frameTracking(false),
accessRights(AccessDefault), startTime(time(nullptr)), frameTracking(false),
udpFramesSinceFull(0), complainedAboutNoViewRights(false), clientUsername("username_unavailable")
{
setStreams(&sock->inStream(), &sock->outStream());
@ -77,11 +78,7 @@ VNCSConnectionST::VNCSConnectionST(VNCServerST* server_, network::Socket *s,
VNCServerST::connectionsLog.write(1,"accepted: %s", peerEndpoint.buf);
memset(bstats_total, 0, sizeof(bstats_total));
gettimeofday(&connStart, NULL);
unsigned i;
for (i = 0; i < MAX_UNIX_RELAYS; i++)
unixRelaySubscriptions[i][0] = '\0';
gettimeofday(&connStart, nullptr);
// Check their permissions, if applicable
kasmpasswdpath[0] = '\0';
@ -93,9 +90,11 @@ VNCSConnectionST::VNCSConnectionST(VNCServerST* server_, network::Socket *s,
user[0] = '\0';
const char *at = strrchr(peerEndpoint.buf, '@');
if (at && at - peerEndpoint.buf > 1 && at - peerEndpoint.buf < USERNAME_LEN) {
memcpy(user, peerEndpoint.buf, at - peerEndpoint.buf);
user[at - peerEndpoint.buf] = '\0';
const auto offset = at - peerEndpoint.buf;
if (at && offset > 1 && static_cast<size_t>(offset) < USERNAME_LEN) {
memcpy(user, peerEndpoint.buf, offset);
user[offset] = '\0';
}
bool read, write, owner;
@ -111,10 +110,12 @@ VNCSConnectionST::VNCSConnectionST(VNCServerST* server_, network::Socket *s,
// Configure the socket
setSocketTimeouts();
lastEventTime = time(0);
gettimeofday(&lastRealUpdate, NULL);
gettimeofday(&lastClipboardOp, NULL);
gettimeofday(&lastKeyEvent, NULL);
lastEventTime = time(nullptr);
gettimeofday(&lastRealUpdate, nullptr);
gettimeofday(&lastClipboardOp, nullptr);
gettimeofday(&lastKeyEvent, nullptr);
cp.available_encoders = encoder_probe.get_available_encoders();
server->clients.push_front(this);
@ -144,7 +145,7 @@ VNCSConnectionST::~VNCSConnectionST()
}
if (server->pointerClient == this)
server->pointerClient = 0;
server->pointerClient = nullptr;
// Remove this client from the server
server->clients.remove(this);
@ -180,7 +181,7 @@ void VNCSConnectionST::close(const char* reason)
vlog.debug("second close: %s (%s)", peerEndpoint.buf, reason);
if (authenticated()) {
server->lastDisconnectTime = time(0);
server->lastDisconnectTime = time(nullptr);
// First update the client state to CLOSING to ensure it's not included in user lists
setState(RFBSTATE_CLOSING);
@ -544,7 +545,7 @@ int VNCSConnectionST::checkIdleTimeout()
if (idleTimeout == 0) return 0;
if (state() != RFBSTATE_NORMAL && idleTimeout < 15)
idleTimeout = 15; // minimum of 15 seconds while authenticating
time_t now = time(0);
time_t now = time(nullptr);
if (now < lastEventTime) {
// Someone must have set the time backwards. Set lastEventTime so that the
// idleTimeout will count from now.
@ -632,7 +633,7 @@ bool VNCSConnectionST::needRenderedCursor()
!cp.supportsLocalCursor && !cp.supportsLocalXCursor)
return true;
if (!server->cursorPos.equals(pointerEventPos) &&
(time(0) - pointerEventTime) > 0)
(time(nullptr) - pointerEventTime) > 0)
return true;
return false;
@ -1520,7 +1521,7 @@ void VNCSConnectionST::writeDataUpdate()
// Does the client need a server-side rendered cursor?
cursor = NULL;
cursor = nullptr;
if (needRenderedCursor()) {
Rect renderedCursorRect;
@ -1559,7 +1560,7 @@ void VNCSConnectionST::writeDataUpdate()
msSince(&lastRealUpdate) < losslessThreshold))
return;
writeRTTPing();
// writeRTTPing();
// FIXME: If continuous updates aren't used then the client might
// be slower than frameRate in its requests and we could
@ -1570,9 +1571,9 @@ void VNCSConnectionST::writeDataUpdate()
server->msToNextUpdate() / 1000;
if (!ui.is_empty()) {
encodeManager.writeUpdate(ui, server->getPixelBuffer(), cursor, maxUpdateSize);
encodeManager.writeUpdate(ui, server->screenLayout, server->getPixelBuffer(), cursor, maxUpdateSize);
copypassed.clear();
gettimeofday(&lastRealUpdate, NULL);
gettimeofday(&lastRealUpdate, nullptr);
losslessTimer.start(losslessThreshold);
const unsigned ms = encodeManager.getEncodingTime();
@ -1596,11 +1597,11 @@ void VNCSConnectionST::writeDataUpdate()
bstats_total[BS_CPU_CLOSE]++;
}
} else {
encodeManager.writeLosslessRefresh(req, server->getPixelBuffer(),
encodeManager.writeLosslessRefresh(req, server->screenLayout, server->getPixelBuffer(),
cursor, maxUpdateSize);
}
writeRTTPing();
// writeRTTPing();
// The request might be for just part of the screen, so we cannot
// just clear the entire update tracker.
@ -1622,7 +1623,7 @@ void VNCSConnectionST::writeBinaryClipboard()
writer()->writeBinaryClipboard(binaryClipboard);
gettimeofday(&lastClipboardOp, NULL);
gettimeofday(&lastClipboardOp, nullptr);
}
void VNCSConnectionST::screenLayoutChange(rdr::U16 reason)
@ -1912,6 +1913,10 @@ void VNCSConnectionST::unixRelay(const char *name, const rdr::U8 *buf, const uns
}
}
void VNCSConnectionST::videoEncodersRequest(const std::vector<int32_t> &encoders) {
writer()->writeVideoEncoders(encoders);
}
void VNCSConnectionST::sendUnixRelayData(const char name[], const unsigned char *buf,
const unsigned len)
{

View File

@ -35,6 +35,7 @@
#include <rfb/Timer.h>
#include <rfb/unixRelayLimits.h>
#include <rfb/encoders/EncoderProbe.h>
#include "kasmpasswd.h"
namespace rfb {
@ -43,7 +44,7 @@ namespace rfb {
class VNCSConnectionST : public SConnection,
public Timer::Callback {
public:
VNCSConnectionST(VNCServerST* server_, network::Socket* s, bool reverse);
VNCSConnectionST(VNCServerST* server_, network::Socket* s, const video_encoders::EncoderProbe &encoder_probe, bool reverse);
virtual ~VNCSConnectionST();
// Methods called from VNCServerST. None of these methods ever knowingly
@ -258,12 +259,13 @@ namespace rfb {
virtual void udpUpgrade(const char *resp);
virtual void subscribeUnixRelay(const char *name);
virtual void unixRelay(const char *name, const rdr::U8 *buf, const unsigned len);
void videoEncodersRequest(std::vector<int32_t> const &encoders) override;
virtual void supportsLocalCursor();
virtual void supportsFence();
virtual void supportsContinuousUpdates();
virtual void supportsLEDState();
virtual bool canChangeKasmSettings() const {
bool canChangeKasmSettings() const override {
return (accessRights & (AccessPtrEvents | AccessKeyEvents)) ==
(AccessPtrEvents | AccessKeyEvents);
}
@ -361,7 +363,7 @@ namespace rfb {
bool frameTracking;
uint32_t udpFramesSinceFull;
char unixRelaySubscriptions[MAX_UNIX_RELAYS][MAX_UNIX_RELAY_NAME_LEN];
char unixRelaySubscriptions[MAX_UNIX_RELAYS][MAX_UNIX_RELAY_NAME_LEN] = {};
bool complainedAboutNoViewRights;
std::string clientUsername;
};

View File

@ -71,11 +71,15 @@
#include <arpa/inet.h>
#include <fcntl.h>
#include <filesystem>
#include <string_view>
#include <sys/inotify.h>
#include <unistd.h>
#include <wordexp.h>
#include <filesystem>
#include <string_view>
#include <fmt/core.h>
#include "encoders/KasmVideoConstants.h"
#include "encoders/EncoderProbe.h"
using namespace rfb;
@ -131,7 +135,7 @@ static void parseRegionPart(const bool percents, rdr::U16 &pcdest, int &dest,
*inptr = ptr;
}
VNCServerST::VNCServerST(const char* name_, SDesktop* desktop_)
VNCServerST::VNCServerST(const char* name_, SDesktop* desktop_, const video_encoders::EncoderProbe &encoder_probe_)
: blHosts(&blacklist), desktop(desktop_), desktopStarted(false),
blockCounter(0), pb(nullptr), blackedpb(nullptr), ledState(ledUnknown),
name(strDup(name_)), pointerClient(nullptr), clipboardClient(nullptr),
@ -140,7 +144,7 @@ VNCServerST::VNCServerST(const char* name_, SDesktop* desktop_)
queryConnectionHandler(nullptr), keyRemapper(&KeyRemapper::defInstance),
lastConnectionTime(0), disableclients(false),
frameTimer(this), apimessager(nullptr), trackingFrameStats(0),
clipboardId(0), sendWatermark(false)
clipboardId(0), sendWatermark(false), encoder_probe(encoder_probe_)
{
auto to_string = [](const bool value) {
return value ? "yes" : "no";
@ -154,6 +158,19 @@ VNCServerST::VNCServerST(const char* name_, SDesktop* desktop_)
to_string(cpu_info::has_sse4_2),
to_string(cpu_info::has_avx512f));
std::string available_accelerators{};
for (const auto encoder: encoder_probe.get_available_encoders()) {
if (KasmVideoEncoders::is_accelerated(encoder)) {
if (!available_accelerators.empty())
available_accelerators.append(", ");
available_accelerators.append(KasmVideoEncoders::to_string(encoder));
}
}
slog.info("Hardware video encoding acceleration capability: %s",
available_accelerators.empty() ? "none" : available_accelerators.c_str());
DLPRegion.enabled = DLPRegion.percents = false;
if (Server::DLP_Region[0]) {
@ -221,14 +238,14 @@ VNCServerST::VNCServerST(const char* name_, SDesktop* desktop_)
if (kasmpasswdpath[0] && access(kasmpasswdpath, R_OK) == 0) {
// Set up a watch on the password file
inotifyfd = inotify_init();
if (inotifyfd < 0)
inotify_fd = inotify_init();
if (inotify_fd < 0)
slog.error("Failed to init inotify");
int flags = fcntl(inotifyfd, F_GETFL, 0);
fcntl(inotifyfd, F_SETFL, flags | O_NONBLOCK);
int flags = fcntl(inotify_fd, F_GETFL, 0);
fcntl(inotify_fd, F_SETFL, flags | O_NONBLOCK);
if (inotify_add_watch(inotifyfd, kasmpasswdpath, IN_CLOSE_WRITE | IN_DELETE_SELF) < 0)
if (inotify_add_watch(inotify_fd, kasmpasswdpath, IN_CLOSE_WRITE | IN_DELETE_SELF) < 0)
slog.error("Failed to set watch");
}
@ -241,9 +258,9 @@ VNCServerST::VNCServerST(const char* name_, SDesktop* desktop_)
SelfBench();
if (Server::benchmark[0]) {
auto *file_name = Server::benchmark.getValueStr();
const auto *file_name = Server::benchmark.getValueStr();
if (!std::filesystem::exists(file_name))
throw Exception("Benchmarking video file does not exist");
throw std::invalid_argument("Benchmarking video file does not exist");
benchmark(file_name, Server::benchmarkResults.getValueStr());
}
}
@ -298,7 +315,7 @@ void VNCServerST::addSocket(network::Socket* sock, bool outgoing)
lastConnectionTime = time(0);
}
VNCSConnectionST* client = new VNCSConnectionST(this, sock, outgoing);
VNCSConnectionST* client = new VNCSConnectionST(this, sock, encoder_probe, outgoing);
client->init();
if (watermarkData)
@ -1062,16 +1079,16 @@ void VNCServerST::writeUpdate()
// Check if the password file was updated
bool permcheck = false;
if (inotifyfd >= 0) {
if (inotify_fd >= 0) {
char buf[256];
int ret = read(inotifyfd, buf, 256);
int ret = read(inotify_fd, buf, 256);
int pos = 0;
while (ret > 0) {
const struct inotify_event * const ev = (struct inotify_event *) &buf[pos];
if (ev->mask & IN_IGNORED) {
// file was deleted, set new watch
if (inotify_add_watch(inotifyfd, kasmpasswdpath, IN_CLOSE_WRITE | IN_DELETE_SELF) < 0)
if (inotify_add_watch(inotify_fd, kasmpasswdpath, IN_CLOSE_WRITE | IN_DELETE_SELF) < 0)
slog.error("Failed to set watch");
}

View File

@ -26,15 +26,17 @@
#include <sys/time.h>
#include <rfb/EncCache.h>
#include <rfb/SDesktop.h>
#include <rfb/VNCServer.h>
#include <rfb/LogWriter.h>
#include <network/Socket.h>
#include <rfb/Blacklist.h>
#include <rfb/Cursor.h>
#include <rfb/Timer.h>
#include <network/Socket.h>
#include <rfb/EncCache.h>
#include <rfb/LogWriter.h>
#include <rfb/SDesktop.h>
#include <rfb/ScreenSet.h>
#include <rfb/Timer.h>
#include <rfb/VNCServer.h>
#include <rfb/encoders/KasmVideoConstants.h>
#include <rfb/encoders/EncoderProbe.h>
#include <string>
namespace rfb {
@ -52,7 +54,7 @@ namespace rfb {
// -=- Constructors
// Create a server exporting the supplied desktop.
VNCServerST(const char* name_, SDesktop* desktop_);
VNCServerST(const char* name_, SDesktop* desktop_, const video_encoders::EncoderProbe &encoder_probe);
virtual ~VNCServerST();
@ -278,7 +280,7 @@ namespace rfb {
Timer frameTimer;
int inotifyfd;
int inotify_fd{-1};
network::GetAPIMessager *apimessager;
@ -300,6 +302,7 @@ namespace rfb {
rdr::U8 &trackingFrameStats, char trackingClient[]);
bool sendWatermark;
const video_encoders::EncoderProbe &encoder_probe;
};
};

View File

@ -41,7 +41,7 @@ ZRLEEncoder::~ZRLEEncoder()
zos.setUnderlying(NULL);
}
bool ZRLEEncoder::isSupported()
bool ZRLEEncoder::isSupported() const
{
return conn->cp.supportsEncoding(encodingZRLE);
}

View File

@ -28,14 +28,14 @@ namespace rfb {
class ZRLEEncoder : public Encoder {
public:
ZRLEEncoder(SConnection* conn);
virtual ~ZRLEEncoder();
~ZRLEEncoder() override;
virtual bool isSupported();
bool isSupported() const override;
virtual void writeRect(const PixelBuffer* pb, const Palette& palette);
virtual void writeSolidRect(int width, int height,
void writeRect(const PixelBuffer* pb, const Palette& palette) override;
void writeSolidRect(int width, int height,
const PixelFormat& pf,
const rdr::U8* colour);
const rdr::U8* colour) override;
protected:
void writePaletteTile(const Rect& tile, const PixelBuffer* pb,

View File

@ -0,0 +1,144 @@
/* Copyright (C) 2025 Kasm Technologies Corp
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include "FfmpegFrameFeeder.h"
#include <filesystem>
#include <string_view>
FfmpegFrameFeeder::FfmpegFrameFeeder(FFmpeg *ffmpeg_) : ffmpeg{ffmpeg_} {}
FfmpegFrameFeeder::~FfmpegFrameFeeder() {
ffmpeg->avcodec_close(codec_ctx_guard.get());
}
void FfmpegFrameFeeder::open(const std::string_view path) {
AVFormatContext *format_ctx{};
if (ffmpeg->avformat_open_input(&format_ctx, path.data(), nullptr, nullptr) < 0)
throw std::runtime_error("Could not open video file");
format_ctx_guard.reset(format_ctx);
// Find stream info
if (ffmpeg->avformat_find_stream_info(format_ctx, nullptr) < 0)
throw std::runtime_error("Could not find stream info");
// Find video stream
for (uint32_t i = 0; i < format_ctx->nb_streams; ++i) {
if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_idx = static_cast<int>(i);
break;
}
}
if (video_stream_idx == -1)
throw std::runtime_error("No video stream found");
// Get codec parameters and decoder
const auto *codec_parameters = format_ctx->streams[video_stream_idx]->codecpar;
const auto *codec = ffmpeg->avcodec_find_decoder(codec_parameters->codec_id);
if (!codec)
throw std::runtime_error("Codec not found");
auto *codec_ctx = ffmpeg->avcodec_alloc_context3(codec);
if (!codec_ctx || ffmpeg->avcodec_parameters_to_context(codec_ctx, codec_parameters) < 0)
throw std::runtime_error("Failed to set up codec context");
codec_ctx_guard.reset(codec_ctx);
if (ffmpeg->avcodec_open2(codec_ctx, codec, nullptr) < 0)
throw std::runtime_error("Could not open codec");
}
FfmpegFrameFeeder::play_stats_t FfmpegFrameFeeder::play(benchmarking::MockTestConnection *connection) const {
// Allocate frame and packet
const FFmpeg::FrameGuard frame{ffmpeg->av_frame_alloc()};
const FFmpeg::PacketGuard packet{ffmpeg->av_packet_alloc()};
if (!frame || !packet)
throw std::runtime_error("Could not allocate frame or packet");
// Scaling context to convert to RGB24
auto *sws_ctx = ffmpeg->sws_getContext(codec_ctx_guard->width,
codec_ctx_guard->height,
codec_ctx_guard->pix_fmt,
codec_ctx_guard->width,
codec_ctx_guard->height,
AV_PIX_FMT_RGB24,
SWS_BILINEAR,
nullptr,
nullptr,
nullptr);
if (!sws_ctx)
throw std::runtime_error("Could not create scaling context");
const std::unique_ptr<SwsContext, void (*)(SwsContext *)> sws_ctx_guard(sws_ctx, ffmpeg->sws_freeContext);
const FFmpeg::FrameGuard rgb_frame{ffmpeg->av_frame_alloc()};
if (!rgb_frame)
throw std::runtime_error("Could not allocate frame");
rgb_frame->format = AV_PIX_FMT_RGB24;
rgb_frame->width = codec_ctx_guard->width;
rgb_frame->height = codec_ctx_guard->height;
if (ffmpeg->av_frame_get_buffer(rgb_frame.get(), 0) != 0)
throw std::runtime_error("Could not allocate frame data");
play_stats_t stats{};
const auto total_frame_count = get_total_frame_count();
stats.timings.reserve(total_frame_count > 0 ? total_frame_count : 2048);
auto *codec_ctx = codec_ctx_guard.get();
while (ffmpeg->av_read_frame(format_ctx_guard.get(), packet.get()) == 0) {
if (packet->stream_index == video_stream_idx) {
if (ffmpeg->avcodec_send_packet(codec_ctx, packet.get()) == 0) {
while (ffmpeg->avcodec_receive_frame(codec_ctx, frame.get()) == 0) {
// Convert to RGB
if (ffmpeg->sws_scale(sws_ctx_guard.get(),
frame->data,
frame->linesize,
0,
frame->height,
rgb_frame->data,
rgb_frame->linesize) < 0)
throw std::runtime_error("Could not scale frame");
connection->framebufferUpdateStart();
connection->setNewFrame(rgb_frame.get());
using namespace std::chrono;
auto now = high_resolution_clock::now();
connection->framebufferUpdateEnd();
const auto duration = duration_cast<milliseconds>(high_resolution_clock::now() - now).count();
// vlog.info("Frame took %lu ms", duration);
stats.total += duration;
stats.timings.push_back(duration);
}
}
}
ffmpeg->av_packet_unref(packet.get());
}
if (ffmpeg->av_seek_frame(format_ctx_guard.get(), video_stream_idx, 0, AVSEEK_FLAG_BACKWARD) < 0)
throw std::runtime_error("Could not seek to start of video");
ffmpeg->avcodec_flush_buffers(codec_ctx);
return stats;
}

View File

@ -0,0 +1,41 @@
#pragma once
#include <vector>
#include "benchmark.h"
#include "rfb/LogWriter.h"
#include "rfb/ffmpeg.h"
class FfmpegFrameFeeder final {
rfb::LogWriter vlog{"FFmpeg"};
FFmpeg *ffmpeg{};
FFmpeg::ContextGuard codec_ctx_guard{};
FFmpeg::FormatCtxGuard format_ctx_guard{};
int video_stream_idx{-1};
public:
explicit FfmpegFrameFeeder(FFmpeg *ffmpeg);
~FfmpegFrameFeeder();
void open(std::string_view path);
[[nodiscard]] int64_t get_total_frame_count() const { return format_ctx_guard->streams[video_stream_idx]->nb_frames; }
struct frame_dimensions_t
{
int width{};
int height{};
};
[[nodiscard]] frame_dimensions_t get_frame_dimensions() const { return {codec_ctx_guard->width, codec_ctx_guard->height}; }
struct play_stats_t
{
uint64_t frames{};
uint64_t total{};
std::vector<uint64_t> timings;
};
play_stats_t play(benchmarking::MockTestConnection *connection) const;
};

View File

@ -19,25 +19,22 @@
*/
#include "benchmark.h"
#include <string_view>
#include <rfb/LogWriter.h>
#include <numeric>
#include <tinyxml2.h>
#include <algorithm>
#include <cassert>
#include "ServerCore.h"
#include <cmath>
#include "EncCache.h"
#include "EncodeManager.h"
#include "SConnection.h"
#include "screenTypes.h"
#include "SMsgWriter.h"
#include "UpdateTracker.h"
#include "rdr/BufferedInStream.h"
#include "rdr/OutStream.h"
#include "ffmpeg.h"
#include <numeric>
#include <rdr/BufferedInStream.h>
#include <rdr/OutStream.h>
#include <rfb/EncCache.h>
#include <rfb/EncodeManager.h>
#include <rfb/SConnection.h>
#include <rfb/SMsgWriter.h>
#include <rfb/UpdateTracker.h>
#include <rfb/screenTypes.h>
#include <string_view>
#include <tinyxml2.h>
#include "FfmpegFrameFeeder.h"
#include "rfb/LogWriter.h"
namespace benchmarking {
class MockBufferStream final : public rdr::BufferedInStream {
@ -87,49 +84,43 @@ namespace benchmarking {
~MockSConnection() override = default;
void writeUpdate(const rfb::UpdateInfo &ui, const rfb::PixelBuffer *pb) {
void writeUpdate(const rfb::UpdateInfo &ui, const ScreenSet &layout, const rfb::PixelBuffer *pb) {
cache.clear();
manager.clearEncodingTime();
if (!ui.is_empty()) {
manager.writeUpdate(ui, pb, nullptr);
manager.writeUpdate(ui, layout, pb, nullptr);
} else {
rfb::Region region{pb->getRect()};
manager.writeLosslessRefresh(region, pb, nullptr, 2000);
manager.writeLosslessRefresh(region, layout, pb, nullptr, 2000);
}
}
void setDesktopSize(int fb_width, int fb_height,
const rfb::ScreenSet &layout) override {
void setDesktopSize(int fb_width, int fb_height, const rfb::ScreenSet &layout) override {
cp.width = fb_width;
cp.height = fb_height;
cp.screenLayout = layout;
writer()->writeExtendedDesktopSize(rfb::reasonServer, 0, cp.width, cp.height,
cp.screenLayout);
writer()->writeExtendedDesktopSize(rfb::reasonServer, 0, cp.width, cp.height, cp.screenLayout);
}
void sendStats(const bool toClient) override {
}
void sendStats(const bool toClient) override {}
[[nodiscard]] bool canChangeKasmSettings() const override {
return true;
}
void udpUpgrade(const char *resp) override {
}
void udpUpgrade(const char *resp) override {}
void udpDowngrade(const bool) override {
}
void udpDowngrade(const bool) override {}
void subscribeUnixRelay(const char *name) override {
}
void subscribeUnixRelay(const char *name) override {}
void unixRelay(const char *name, const rdr::U8 *buf, const unsigned len) override {
}
void unixRelay(const char *name, const rdr::U8 *buf, const unsigned len) override {}
void handleFrameStats(rdr::U32 all, rdr::U32 render) override {
}
void videoEncodersRequest(const std::vector<int32_t> &encoders) override {}
void handleFrameStats(rdr::U32 all, rdr::U32 render) override {}
[[nodiscard]] auto getJpegStats() const {
return manager.jpegstats;
@ -139,15 +130,19 @@ namespace benchmarking {
return manager.webpstats;
}
[[nodiscard]] auto bytes() { return out.length(); }
[[nodiscard]] auto udp_bytes() { return udps.length(); }
[[nodiscard]] auto bytes() {
return out.length();
}
[[nodiscard]] auto udp_bytes() {
return udps.length();
}
protected:
MockStream out{};
MockStream udps{};
EncCache cache{};
EncodeManager manager{this, &cache};
EncodeManager manager{this, &cache, FFmpeg::get(), video_encoders::EncoderProbe::get(FFmpeg::get(), {}, nullptr)};
};
class MockCConnection final : public MockTestConnection {
@ -173,8 +168,7 @@ namespace benchmarking {
}
void setCursor(int width, int height, const rfb::Point &hotspot, const rdr::U8 *data,
const bool resizing) override {
}
const bool resizing) override {}
~MockCConnection() override = default;
@ -238,23 +232,18 @@ namespace benchmarking {
updates.add_changed(pb->getRect());
updates.getUpdateInfo(&ui, clip);
sc.writeUpdate(ui, pb);
sc.writeUpdate(ui, screen_layout, pb);
}
void dataRect(const rfb::Rect &r, int encoding) override {
}
void dataRect(const rfb::Rect &r, int encoding) override {}
void setColourMapEntries(int, int, rdr::U16 *) override {
}
void setColourMapEntries(int, int, rdr::U16 *) override {}
void bell() override {
}
void bell() override {}
void serverCutText(const char *, rdr::U32) override {
}
void serverCutText(const char *, rdr::U32) override {}
void serverCutText(const char *str) override {
}
void serverCutText(const char *str) override {}
protected:
MockBufferStream in;
@ -262,10 +251,10 @@ namespace benchmarking {
rfb::SimpleUpdateTracker updates;
MockSConnection sc;
};
}
} // namespace benchmarking
void report(std::vector<uint64_t> &totals, std::vector<uint64_t> &timings,
std::vector<benchmarking::MockCConnection::stats_t> &stats, const std::string_view results_file) {
const std::vector<benchmarking::MockCConnection::stats_t> &stats, const std::string_view results_file) {
auto totals_sum = std::accumulate(totals.begin(), totals.end(), 0.);
auto totals_avg = totals_sum / static_cast<double>(totals.size());
@ -354,11 +343,15 @@ void report(std::vector<uint64_t> &totals, std::vector<uint64_t> &timings,
void benchmark(std::string_view path, const std::string_view results_file) {
try {
vlog.info("Benchmarking with video file %s", path.data());
FFmpegFrameFeeder frame_feeder{};
auto &ffmpeg = FFmpeg::get();
if (!ffmpeg.is_available())
throw std::runtime_error("FFmpeg is not available");
FfmpegFrameFeeder frame_feeder{&ffmpeg};
frame_feeder.open(path);
static const rfb::PixelFormat pf{32, 24, false, true, 0xFF, 0xFF, 0xFF, 0, 8, 16};
std::vector<rdr::S32> encodings{
const std::vector<rdr::S32> encodings{
std::begin(benchmarking::default_encodings), std::end(benchmarking::default_encodings)
};

View File

@ -18,9 +18,9 @@
#pragma once
#include "CConnection.h"
#include "CMsgReader.h"
#include "LogWriter.h"
#include <rfb/CConnection.h>
#include <rfb/CMsgReader.h>
#include <rfb/LogWriter.h>
extern "C" {
#include <libavutil/frame.h>

View File

@ -21,21 +21,21 @@
namespace rfb {
// Formats
const unsigned int clipboardUTF8 = 1 << 0;
const unsigned int clipboardRTF = 1 << 1;
const unsigned int clipboardHTML = 1 << 2;
const unsigned int clipboardDIB = 1 << 3;
const unsigned int clipboardFiles = 1 << 4;
constexpr unsigned int clipboardUTF8 = 1 << 0;
constexpr unsigned int clipboardRTF = 1 << 1;
constexpr unsigned int clipboardHTML = 1 << 2;
constexpr unsigned int clipboardDIB = 1 << 3;
constexpr unsigned int clipboardFiles = 1 << 4;
const unsigned int clipboardFormatMask = 0x0000ffff;
constexpr unsigned int clipboardFormatMask = 0x0000ffff;
// Actions
const unsigned int clipboardCaps = 1 << 24;
const unsigned int clipboardRequest = 1 << 25;
const unsigned int clipboardPeek = 1 << 26;
const unsigned int clipboardNotify = 1 << 27;
const unsigned int clipboardProvide = 1 << 28;
constexpr unsigned int clipboardCaps = 1 << 24;
constexpr unsigned int clipboardRequest = 1 << 25;
constexpr unsigned int clipboardPeek = 1 << 26;
constexpr unsigned int clipboardNotify = 1 << 27;
constexpr unsigned int clipboardProvide = 1 << 28;
const unsigned int clipboardActionMask = 0xff000000;
constexpr unsigned int clipboardActionMask = 0xff000000;
}
#endif

View File

@ -0,0 +1,63 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include "EncoderConfiguration.h"
namespace rfb {
static inline std::array<EncoderConfiguration, static_cast<size_t>(KasmVideoEncoders::Encoder::unavailable) + 1> EncoderConfigurations =
{
// AV1
// av1_vaapi
EncoderConfiguration{0, 0, {}},
// av1_ffmpeg_vaapi
EncoderConfiguration{0, 0, {}},
// av1_nvenc
EncoderConfiguration{0, 0, {}},
// av1_software
EncoderConfiguration{0, 0, {}},
// H.265
// h265_vaapi
EncoderConfiguration{0, 51, {18, 23, 28, 39, 51}},
// h265_ffmpeg_vaapi
EncoderConfiguration{0, 51, {18, 23, 28, 39, 51}},
// h265_nvenc
EncoderConfiguration{0, 51, {18, 23, 28, 39, 51}},
// h265_software
EncoderConfiguration{0, 51, {18, 23, 28, 39, 51}},
// H.264
// h264_vaapi
EncoderConfiguration{0, 51, {18, 23, 28, 33, 51}},
// h264_ffmpeg_vaapi
EncoderConfiguration{0, 51, {12, 16, 25, 39, 51}},
// h264_nvenc
EncoderConfiguration{0, 51, {18, 23, 28, 39, 51}},
// h264_software
EncoderConfiguration{1, 51, {9, 18, 25, 39, 51}},
EncoderConfiguration{}
};
// Compile-time check: EncoderConfigurations must match Encoder enum count (excluding unavailable)
static_assert(EncoderConfigurations.size() == static_cast<size_t>(KasmVideoEncoders::Encoder::unavailable) + 1,
"EncoderSettingsArray size must match KasmVideoEncoders::Encoder enum count.");
const EncoderConfiguration &EncoderConfiguration::get_configuration(KasmVideoEncoders::Encoder encoder) {
return EncoderConfigurations[static_cast<uint8_t>(encoder)];
}
} // namespace rfb

View File

@ -0,0 +1,44 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include <array>
#include <cstdint>
#include <string>
#include <variant>
#include "KasmVideoEncoders.h"
#include "rdr/types.h"
namespace rfb {
struct EncoderConfiguration {
struct CodecOption {
std::string name;
std::variant<rdr::S32, std::string> value;
};
static constexpr uint8_t MAX_PRESETS = 5;
rdr::S32 min_quality{};
rdr::S32 max_quality{};
// std::vector<CodecOption> codecOptions{};
std::array<rdr::S32, MAX_PRESETS> presets{};
static const EncoderConfiguration &get_configuration(KasmVideoEncoders::Encoder encoder);
};
} // namespace rfb

View File

@ -0,0 +1,161 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include "EncoderProbe.h"
#include <fcntl.h>
#include <string>
#include <unistd.h>
#include <vector>
#include "KasmVideoConstants.h"
#include <rfb/LogWriter.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext.h>
#include <libavutil/opt.h>
}
#include "rfb/ffmpeg.h"
namespace rfb::video_encoders {
static LogWriter vlog("EncoderProbe");
struct EncoderCandidate {
KasmVideoEncoders::Encoder encoder;
AVCodecID codec_id;
AVHWDeviceType hw_type;
};
static std::array<EncoderCandidate, 6> candidates = {
{
//{KasmVideoEncoders::Encoder::h264_nvenc, AV_CODEC_ID_H264, AV_HWDEVICE_TYPE_VAAPI}
//{KasmVideoEncoders::Encoder::av1_vaapi, AV_CODEC_ID_AV1, AV_HWDEVICE_TYPE_VAAPI},
//{KasmVideoEncoders::Encoder::hevc_vaapi, AV_CODEC_ID_HEVC, AV_HWDEVICE_TYPE_VAAPI}, // h265
EncoderCandidate{KasmVideoEncoders::Encoder::h264_ffmpeg_vaapi, AV_CODEC_ID_H264, AV_HWDEVICE_TYPE_VAAPI},
// EncoderCandidate{KasmVideoEncoders::Encoder::h264_software, AV_CODEC_ID_H264, AV_HWDEVICE_TYPE_NONE}
//{KasmVideoEncoders::Encoder::av1_software, AV_CODEC_ID_AV1, AV_HWDEVICE_TYPE_NONE},
//{KasmVideoEncoders::Encoder::h265_software, AV_CODEC_ID_HEVC, AV_HWDEVICE_TYPE_NONE},
}
};
EncoderProbe::EncoderProbe(FFmpeg &ffmpeg_, const std::vector<std::string_view> &parsed_encoders, const char *dri_node) :
ffmpeg(ffmpeg_) {
if (!ffmpeg.is_available()) {
available_encoders.push_back(KasmVideoEncoders::Encoder::unavailable);
} else {
auto debug_encoders = [] (const char *msg, const KasmVideoEncoders::Encoders &encoders) {
std::string encoder_names;
for (const auto encoder: encoders)
encoder_names.append(KasmVideoEncoders::to_string(encoder)).append(" ");
if (!encoder_names.empty())
vlog.debug("%s: %s",msg, encoder_names.c_str());
};
const auto encoders = SupportedVideoEncoders::map_encoders(parsed_encoders);
debug_encoders("CLI-specified video codecs", encoders);
available_encoders = probe(dri_node);
debug_encoders("Available encoders", available_encoders);
available_encoders = SupportedVideoEncoders::filter_available_encoders(encoders, available_encoders);
debug_encoders("Using CLI-specified video codecs (supported subset)", available_encoders);
}
available_encoders.shrink_to_fit();
if (available_encoders.empty())
best_encoder = KasmVideoEncoders::Encoder::unavailable;
else
best_encoder = available_encoders.front();
}
KasmVideoEncoders::Encoders EncoderProbe::probe(const char *dri_node) {
KasmVideoEncoders::Encoders result{};
for (const auto &encoder_candidate: candidates) {
const AVCodec *codec = ffmpeg.avcodec_find_encoder_by_name(KasmVideoEncoders::to_string(encoder_candidate.encoder));
if (!codec || codec->type != AVMEDIA_TYPE_VIDEO)
continue;
if (encoder_candidate.hw_type != AV_HWDEVICE_TYPE_NONE) {
if (!ffmpeg.av_codec_is_encoder(codec))
continue;
FFmpeg::BufferGuard hw_ctx_guard;
AVBufferRef *hw_ctx{};
if (dri_node) {
const auto err = ffmpeg.av_hwdevice_ctx_create(&hw_ctx, encoder_candidate.hw_type, dri_node, nullptr, 0);
if (err == 0) {
hw_ctx_guard.reset(hw_ctx);
drm_device_path = dri_node;
result.push_back(encoder_candidate.encoder);
} else
vlog.error("%s", ffmpeg.get_error_description(err).c_str());
} else {
vlog.debug("Trying to open all DRM devices");
for (const auto *drm_dev_path: drm_device_paths) {
const auto err = ffmpeg.av_hwdevice_ctx_create(&hw_ctx, encoder_candidate.hw_type, drm_dev_path, nullptr, 0);
if (err < 0) {
vlog.error("%s", ffmpeg.get_error_description(err).c_str());
continue;
}
hw_ctx_guard.reset(hw_ctx);
drm_device_path = drm_dev_path;
vlog.info("Found DRM device %s", drm_dev_path);
if (encoder_candidate.hw_type == AV_HWDEVICE_TYPE_VAAPI) {
vlog.debug("DEBUG: Codec: %s\n", codec->name);
const FFmpeg::ContextGuard ctx_guard{ffmpeg.avcodec_alloc_context3(codec)};
const AVOption *opt{};
while (opt = ffmpeg.av_opt_next(ctx_guard->priv_data, opt), opt) {
vlog.debug("DEBUG: Option: %s.%s (help: %s)\n", codec->name, opt->name, opt->help ? opt->help : "n/a");
}
}
result.push_back(encoder_candidate.encoder);
break;
}
}
}
}
result.push_back(KasmVideoEncoders::Encoder::h264_software);
result.push_back(KasmVideoEncoders::Encoder::h265_software);
// result.push_back(KasmVideoEncoders::Encoder::av1_software);
return result;
}
/*bool EncoderProbe::is_acceleration_available() {
if (access(render_path, R_OK | W_OK) != 0)
return false;
const int fd = open(render_path, O_RDWR);
if (fd < 0)
return false;
close(fd);
return true;
}*/
} // namespace rfb::video_encoders

View File

@ -0,0 +1,67 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include <vector>
#include "KasmVideoConstants.h"
#include "SupportedVideoEncoders.h"
#include "rfb/ffmpeg.h"
namespace rfb::video_encoders {
class EncoderProbe {
KasmVideoEncoders::Encoder best_encoder{KasmVideoEncoders::Encoder::h264_software};
KasmVideoEncoders::Encoders available_encoders;
std::string drm_device_path;
FFmpeg &ffmpeg;
explicit EncoderProbe(FFmpeg &ffmpeg, const std::vector<std::string_view> &parsed_encoders, const char *dri_node);
KasmVideoEncoders::Encoders probe(const char *dri_node);
public:
EncoderProbe(const EncoderProbe &) = delete;
EncoderProbe &operator=(const EncoderProbe &) = delete;
EncoderProbe(EncoderProbe &&) = delete;
EncoderProbe &operator=(EncoderProbe &&) = delete;
static EncoderProbe &get(FFmpeg &ffmpeg, const std::vector<std::string_view> &parsed_encoders, const char *dri_node) {
static EncoderProbe instance{ffmpeg, parsed_encoders, dri_node};
return instance;
}
// [[nodiscard]] static bool is_acceleration_available();
[[nodiscard]] KasmVideoEncoders::Encoder get_best_encoder() const {
return best_encoder;
}
[[nodiscard]] const KasmVideoEncoders::Encoders &get_available_encoders() const {
return available_encoders;
}
[[nodiscard]] const KasmVideoEncoders::Encoders &update_encoders(const std::vector<std::string_view> &codecs) {
available_encoders = SupportedVideoEncoders::filter_available_encoders(SupportedVideoEncoders::map_encoders(codecs), available_encoders);
return available_encoders;
}
[[nodiscard]] const char *get_drm_device_path() const {
return drm_device_path.c_str();
}
};
} // namespace rfb::video_encoders

View File

@ -0,0 +1,282 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include "FFMPEGVAAPIEncoder.h"
#include <fmt/format.h>
#include <rfb/ServerCore.h>
#include "EncoderProbe.h"
#include "rfb/LogWriter.h"
extern "C" {
#include <libavutil/opt.h>
}
#include "KasmVideoConstants.h"
#include "rfb/encodings.h"
#include <rfb/encoders/utils.h>
static rfb::LogWriter vlog("FFMPEGVAAPIEncoder");
namespace rfb {
FFMPEGVAAPIEncoder::FFMPEGVAAPIEncoder(Screen layout_, const FFmpeg &ffmpeg_, SConnection *conn, KasmVideoEncoders::Encoder encoder_,
const char *dri_node_, VideoEncoderParams params) :
VideoEncoder(layout_.id, conn), layout(layout_),
ffmpeg(ffmpeg_), encoder(encoder_), current_params(params), msg_codec_id(KasmVideoEncoders::to_msg_id(encoder)),
dri_node(dri_node_) {
AVBufferRef *hw_device_ctx{};
int err{};
if (err = ffmpeg.av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, dri_node_, nullptr, 0); err < 0) {
throw std::runtime_error(fmt::format("Failed to create VAAPI device context {}", ffmpeg.get_error_description(err)));
}
hw_device_ctx_guard.reset(hw_device_ctx);
const auto *enc_name = KasmVideoEncoders::to_string(encoder);
codec = ffmpeg.avcodec_find_encoder_by_name(enc_name);
if (!codec)
throw std::runtime_error(fmt::format("Could not find {} encoder", enc_name));
auto *frame = ffmpeg.av_frame_alloc();
if (!frame)
throw std::runtime_error("Cannot allocate AVFrame");
sw_frame_guard.reset(frame);
auto *pkt = ffmpeg.av_packet_alloc();
if (!pkt) {
throw std::runtime_error("Could not allocate packet");
}
pkt_guard.reset(pkt);
}
bool FFMPEGVAAPIEncoder::init(int width, int height, VideoEncoderParams params) {
current_params = params;
AVHWFramesContext *frames_ctx{};
int err{};
vlog.debug("FRAME RESIZE (%d, %d): RATE: %d, GOP: %d, QUALITY: %d", width, height, current_params.frame_rate, current_params.group_of_picture, current_params.quality);
auto *ctx = ffmpeg.avcodec_alloc_context3(codec);
if (!ctx) {
vlog.error("Cannot allocate AVCodecContext");
return false;
}
ctx_guard.reset(ctx);
ctx->time_base = {1, current_params.frame_rate};
ctx->framerate = {current_params.frame_rate, 1};
ctx->gop_size = current_params.group_of_picture; // interval between I-frames
ctx->max_b_frames = 0; // No B-frames for immediate output
ctx->pix_fmt = AV_PIX_FMT_VAAPI;
ctx_guard->width = current_params.width;
ctx_guard->height = current_params.height;
ctx_guard->coded_width = current_params.width;
ctx_guard->coded_height = current_params.height;
ctx->delay = 0;
ctx->flags |= AV_CODEC_FLAG_LOW_DELAY;
if (ffmpeg.av_opt_set(ctx->priv_data, "async_depth", "1", 0) < 0) {
vlog.info("Cannot set async_depth");
}
if (ffmpeg.av_opt_set(ctx->priv_data, "rc_mode", "CQP", 0) < 0) {
vlog.info("Cannot set rc_mode");
}
if (ffmpeg.av_opt_set_int(ctx->priv_data, "qp", current_params.quality, 0) < 0) {
vlog.info("Cannot set qp");
}
auto *hw_frames_ctx = ffmpeg.av_hwframe_ctx_alloc(hw_device_ctx_guard.get());
if (!hw_frames_ctx) {
vlog.error("Failed to create VAAPI frame context");
return false;
}
hw_frames_ref_guard.reset(hw_frames_ctx);
frames_ctx = reinterpret_cast<AVHWFramesContext *>(hw_frames_ctx->data);
frames_ctx->format = AV_PIX_FMT_VAAPI;
frames_ctx->sw_format = AV_PIX_FMT_NV12;
frames_ctx->width = current_params.width;
frames_ctx->height = current_params.height;
frames_ctx->initial_pool_size = 20;
if (err = ffmpeg.av_hwframe_ctx_init(hw_frames_ctx); err < 0) {
vlog.error("Failed to initialize VAAPI frame context (%s). Error code: %d", ffmpeg.get_error_description(err).c_str(), err);
return false;
}
FFmpeg::av_buffer_unref(&ctx_guard->hw_frames_ctx);
ctx_guard->hw_frames_ctx = ffmpeg.av_buffer_ref(hw_frames_ctx);
if (!ctx_guard->hw_frames_ctx) {
vlog.error("Failed to create buffer reference");
return false;
}
auto *frame = ffmpeg.av_frame_alloc();
if (!frame) {
vlog.error("Cannot allocate AVFrame");
return false;
}
sw_frame_guard.reset(frame);
frame->format = AV_PIX_FMT_NV12;
frame->width = params.width;
frame->height = params.height;
frame->pict_type = AV_PICTURE_TYPE_I;
if (ffmpeg.av_frame_get_buffer(frame, 0) < 0) {
vlog.error("Could not allocate sw-frame data");
return false;
}
auto *hw_frame = ffmpeg.av_frame_alloc();
if (!hw_frame) {
vlog.error("Cannot allocate hw AVFrame");
return false;
}
hw_frame_guard.reset(hw_frame);
if (err = ffmpeg.av_hwframe_get_buffer(hw_frames_ctx, hw_frame, 0); err < 0) {
vlog.error("Could not allocate hw-frame data (%s). Error code: %d", ffmpeg.get_error_description(err).c_str(), err);
return false;
}
if (err = ffmpeg.avcodec_open2(ctx_guard.get(), codec, nullptr); err < 0) {
vlog.error("Failed to open codec (%s). Error code: %d", ffmpeg.get_error_description(err).c_str(), err);
return false;
}
auto *sws_ctx = ffmpeg.sws_getContext(
width, height, AV_PIX_FMT_RGB32, params.width, params.height, AV_PIX_FMT_NV12, SWS_BILINEAR, nullptr, nullptr, nullptr);
if (!sws_ctx) {
vlog.error("Could not initialize the conversion context");
return false;
}
sws_guard.reset(sws_ctx);
return true;
}
bool FFMPEGVAAPIEncoder::isSupported() const {
return conn->cp.supportsEncoding(encodingKasmVideo);
}
bool FFMPEGVAAPIEncoder::render(const PixelBuffer *pb) {
// compress
int stride;
const auto rect = layout.dimensions;
const auto *buffer = pb->getBuffer(rect, &stride);
const int width = rect.width();
const int height = rect.height();
auto *frame = sw_frame_guard.get();
int dst_width = width;
int dst_height = height;
if (width % 2 != 0)
dst_width = width & ~1;
if (height % 2 != 0)
dst_height = height & ~1;
VideoEncoderParams params{dst_width,
dst_height,
static_cast<uint8_t>(Server::frameRate),
static_cast<uint8_t>(Server::groupOfPicture),
static_cast<uint8_t>(Server::videoQualityCRFCQP)};
if (current_params != params) {
bpp = pb->getPF().bpp >> 3;
if (!init(width, height, params)) {
vlog.error("Failed to initialize encoder");
return false;
}
frame = sw_frame_guard.get();
} else {
frame->pict_type = AV_PICTURE_TYPE_NONE;
}
const uint8_t *src_data[1] = {buffer};
const int src_line_size[1] = {stride * bpp}; // RGB has bpp bytes per pixel
int err{};
if (err = ffmpeg.sws_scale(sws_guard.get(), src_data, src_line_size, 0, height, frame->data, frame->linesize); err < 0) {
vlog.error("Error (%s) while scaling image. Error code: %d", ffmpeg.get_error_description(err).c_str(), err);
return false;
}
frame->pts = pts++;
if (err = ffmpeg.av_hwframe_transfer_data(hw_frame_guard.get(), frame, 0); err < 0) {
vlog.error(
"Error while transferring frame data to surface (%s). Error code: %d", ffmpeg.get_error_description(err).c_str(), err);
return false;
}
if (err = ffmpeg.avcodec_send_frame(ctx_guard.get(), hw_frame_guard.get()); err < 0) {
vlog.error("Error sending frame to codec (%s). Error code: %d", ffmpeg.get_error_description(err).c_str(), err);
return false;
}
auto *pkt = pkt_guard.get();
err = ffmpeg.avcodec_receive_packet(ctx_guard.get(), pkt);
if (err == AVERROR(EAGAIN) || err == AVERROR_EOF) {
// Trying again
ffmpeg.avcodec_send_frame(ctx_guard.get(), hw_frame_guard.get());
err = ffmpeg.avcodec_receive_packet(ctx_guard.get(), pkt);
}
if (err < 0) {
vlog.error("Error receiving packet from codec");
return false;
}
if (pkt->flags & AV_PKT_FLAG_KEY)
vlog.debug("Key frame %ld", frame->pts);
return true;
}
void FFMPEGVAAPIEncoder::writeRect(const PixelBuffer *pb, const Palette &palette) {
auto *pkt = pkt_guard.get();
auto *os = conn->getOutStream(conn->cp.supportsUdp);
os->writeU8(layout.id);
os->writeU8(msg_codec_id);
os->writeU8(pkt->flags & AV_PKT_FLAG_KEY);
encoders::write_compact(os, pkt->size);
os->writeBytes(&pkt->data[0], pkt->size);
vlog.debug("Screen id %d, codec %d, frame size: %d", layout.id, msg_codec_id, pkt->size);
ffmpeg.av_packet_unref(pkt);
}
void FFMPEGVAAPIEncoder::writeSolidRect(int width, int height, const PixelFormat &pf, const rdr::U8 *colour) {}
void FFMPEGVAAPIEncoder::writeSkipRect() {
auto *os = conn->getOutStream(conn->cp.supportsUdp);
os->writeU8(layout.id);
os->writeU8(kasmVideoSkip);
}
} // namespace rfb

View File

@ -0,0 +1,63 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include "KasmVideoConstants.h"
#include "rdr/OutStream.h"
#include "rfb/Encoder.h"
#include "rfb/encoders/VideoEncoder.h"
#include "rfb/ffmpeg.h"
namespace rfb {
class FFMPEGVAAPIEncoder final : public VideoEncoder {
Screen layout;
const FFmpeg &ffmpeg;
FFmpeg::FrameGuard sw_frame_guard;
FFmpeg::FrameGuard hw_frame_guard;
FFmpeg::PacketGuard pkt_guard;
FFmpeg::ContextGuard ctx_guard;
FFmpeg::SwsContextGuard sws_guard;
FFmpeg::BufferGuard hw_device_ctx_guard;
FFmpeg::BufferGuard hw_frames_ref_guard;
const AVCodec *codec{};
KasmVideoEncoders::Encoder encoder;
VideoEncoderParams current_params{};
uint8_t msg_codec_id;
int64_t pts{};
int bpp{};
const char *dri_node{};
[[nodiscard]] bool init(int width, int height, VideoEncoderParams params);
template<typename T>
friend class EncoderBuilder;
FFMPEGVAAPIEncoder(Screen layout, const FFmpeg &ffmpeg, SConnection *conn, KasmVideoEncoders::Encoder encoder,
const char *dri_node, VideoEncoderParams params);
public:
bool isSupported() const override;
void writeRect(const PixelBuffer *pb, const Palette &palette) override;
void writeSolidRect(int width, int height, const PixelFormat &pf, const rdr::U8 *colour) override;
bool render(const PixelBuffer *pb) override;
void writeSkipRect() override;
};
} // namespace rfb

View File

@ -0,0 +1,37 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include <array>
#include <cassert>
namespace rfb {
static constexpr unsigned int SupportedEncoderCount = 3;
// Compression control
static constexpr unsigned int kasmVideoH264 = 0x01 << 4; // H.264 encoding
static constexpr unsigned int kasmVideoH265 = 0x02 << 4; // H.265 encoding
static constexpr unsigned int kasmVideoAV1 = 0x03 << 4; // AV1 encoding
static constexpr unsigned int kasmVideoSkip = 0x00 << 4; // Skip frame
static constexpr auto drm_device_paths = std::to_array<const char *>({
"/dev/dri/renderD128",
"/dev/dri/card0",
"/dev/dri/renderD129",
"/dev/dri/card1"
});
} // namespace rfb

View File

@ -0,0 +1,213 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include <array>
#include <cstdint>
#include <rfb/encodings.h>
#include <type_traits>
#include <vector>
#include "KasmVideoConstants.h"
template<typename E>
class EnumRange {
public:
using val_t = std::underlying_type_t<E>;
class EnumIterator {
val_t value;
public:
explicit EnumIterator(E v) :
value(static_cast<std::underlying_type_t<E>>(v)) {}
E operator*() const {
return static_cast<E>(value);
}
EnumIterator &operator++() {
++value;
return *this;
}
bool operator!=(const EnumIterator &other) const {
return value != other.value;
}
};
EnumRange(E begin, E end) :
begin_iter(EnumIterator(begin)),
end_iter(++EnumIterator(end)) {}
[[nodiscard]] EnumIterator begin() const {
return begin_iter;
}
[[nodiscard]] EnumIterator end() const {
return end_iter;
}
EnumIterator begin() {
return begin_iter;
}
EnumIterator end() {
return end_iter;
}
private:
EnumIterator begin_iter;
EnumIterator end_iter;
};
template<typename T>
auto enum_range(T begin, T end) {
return EnumRange<T>(begin, end);
}
namespace rfb {
struct KasmVideoEncoders {
// Codecs are ordered by preferred usage quality
enum class Encoder : uint8_t
{
av1_vaapi,
av1_ffmpeg_vaapi,
av1_nvenc,
av1_software,
h265_vaapi, // h265
h265_ffmpeg_vaapi,
h265_nvenc,
h265_software,
h264_vaapi,
h264_ffmpeg_vaapi,
h264_nvenc,
h264_software,
unavailable // Keep this as the last entry - used for compile-time size checks
};
using Encoders = std::vector<Encoder>;
static inline auto EncoderNames = std::to_array<const char *>({"av1_vaapi",
"av1_vaapi",
"av1_nvenc",
"libsvtav1",
"hevc_vaapi",
"hevc_vaapi",
"hevc_nvenc",
"libx265",
"h264_vaapi",
"h264_vaapi",
"h264_nvenc",
"libx264",
"unavailable"});
static_assert(EncoderNames.size() == static_cast<size_t>(Encoder::unavailable) + 1, "EncoderNames array size must match Encoder enum count.");
static inline auto Encodings = std::to_array<int>({pseudoEncodingStreamingModeAV1VAAPI,
pseudoEncodingStreamingModeAV1VAAPI,
pseudoEncodingStreamingModeAV1NVENC,
pseudoEncodingStreamingModeAV1SW,
pseudoEncodingStreamingModeHEVCVAAPI,
pseudoEncodingStreamingModeHEVCVAAPI,
pseudoEncodingStreamingModeHEVCNVENC,
pseudoEncodingStreamingModeHEVCSW,
pseudoEncodingStreamingModeAVCVAAPI,
pseudoEncodingStreamingModeAVCVAAPI,
pseudoEncodingStreamingModeAVCNVENC,
pseudoEncodingStreamingModeAVCSW,
pseudoEncodingStreamingModeJpegWebp});
static_assert(Encodings.size() == static_cast<size_t>(Encoder::unavailable) + 1, "Encodings array size must match Encoder enum count. ");
static bool is_accelerated(Encoder encoder) {
return encoder != Encoder::h264_software && encoder != Encoder::h265_software && encoder != Encoder::av1_software;
}
static auto to_string(Encoder encoder) {
return EncoderNames[static_cast<uint8_t>(encoder)];
}
static int to_encoding(Encoder encoder) {
return Encodings[static_cast<uint8_t>(encoder)];
}
static Encoder from_encoding(int encoding) {
for (auto encoder: enum_range(Encoder::av1_vaapi, Encoder::unavailable)) {
if (to_encoding(encoder) == encoding) {
switch (encoder) {
case Encoder::av1_vaapi:
return Encoder::av1_ffmpeg_vaapi;
case Encoder::h265_vaapi:
return Encoder::h265_ffmpeg_vaapi;
case Encoder::h264_vaapi:
return Encoder::h264_ffmpeg_vaapi;
default:
return encoder;
}
}
}
return Encoder::unavailable;
}
static unsigned int to_msg_id(Encoder encoder) {
switch (encoder) {
case Encoder::av1_vaapi:
case Encoder::av1_ffmpeg_vaapi:
case Encoder::av1_nvenc:
case Encoder::av1_software:
return kasmVideoAV1;
case Encoder::h265_vaapi: // h265
case Encoder::h265_ffmpeg_vaapi:
case Encoder::h265_nvenc:
case Encoder::h265_software:
return kasmVideoH265;
case Encoder::h264_vaapi:
case Encoder::h264_ffmpeg_vaapi:
case Encoder::h264_nvenc:
case Encoder::h264_software:
return kasmVideoH264;
default:
assert(false);
}
}
static int32_t to_streaming_mode(Encoder encoder) {
switch (encoder) {
case Encoder::av1_vaapi:
case Encoder::av1_ffmpeg_vaapi:
case Encoder::av1_nvenc:
case Encoder::av1_software:
return pseudoEncodingStreamingModeAV1;
case Encoder::h265_vaapi: // h265
case Encoder::h265_ffmpeg_vaapi:
case Encoder::h265_nvenc:
case Encoder::h265_software:
return pseudoEncodingStreamingModeHEVC;
case Encoder::h264_vaapi:
case Encoder::h264_ffmpeg_vaapi:
case Encoder::h264_nvenc:
case Encoder::h264_software:
return pseudoEncodingStreamingModeAVC;
default:
return pseudoEncodingStreamingModeJpegWebp;
}
}
};
} // namespace rfb

View File

@ -0,0 +1,226 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include "ScreenEncoderManager.h"
#include <cassert>
#include <rfb/LogWriter.h>
#include <rfb/Region.h>
#include <rfb/SMsgWriter.h>
#include <rfb/encodings.h>
#include <sys/stat.h>
#include <tbb/parallel_for_each.h>
#include "VideoEncoder.h"
#include "VideoEncoderFactory.h"
namespace rfb {
static LogWriter vlog("ScreenEncoderManager");
template<uint8_t T>
ScreenEncoderManager<T>::ScreenEncoderManager(const FFmpeg &ffmpeg_, KasmVideoEncoders::Encoder encoder,
const std::vector<KasmVideoEncoders::Encoder> &encoders, SConnection *conn, const char *dri_node_, VideoEncoderParams params) :
Encoder(conn, encodingKasmVideo, static_cast<EncoderFlags>(EncoderUseNativePF | EncoderLossy), -1),
ffmpeg(ffmpeg_),
current_params(params),
base_video_encoder(encoder),
available_encoders(encoders),
dri_node(dri_node_) {
screens_to_refresh.reserve(T);
}
template<uint8_t T>
ScreenEncoderManager<T>::~ScreenEncoderManager() {
for (uint8_t i = 0; i < get_screen_count(); ++i)
remove_screen(i);
};
template<uint8_t T>
VideoEncoder *ScreenEncoderManager<T>::add_encoder(const Screen &layout) const {
VideoEncoder *encoder{};
try {
encoder = create_encoder(layout, &ffmpeg, conn, base_video_encoder, dri_node, current_params);
} catch (const std::exception &e) {
if (base_video_encoder != KasmVideoEncoders::Encoder::h264_software) {
vlog.error("Attempting fallback to software encoder due to error: %s", e.what());
try {
encoder = create_encoder(layout, &ffmpeg, conn, KasmVideoEncoders::Encoder::h264_software, nullptr, current_params);
} catch (const std::exception &exception) {
vlog.error("Failed to create software encoder: %s", exception.what());
}
} else
vlog.error("Failed to create software encoder: %s", e.what());
}
return encoder;
}
template<uint8_t T>
bool ScreenEncoderManager<T>::add_screen(uint8_t index, const Screen &layout) {
auto *encoder = add_encoder(layout);
if (!encoder)
return false;
mask |= 1 << index;
screens[index] = {layout, encoder, true};
head = std::min(head, index);
++count;
rebuild_screens_to_refresh();
return true;
}
template<uint8_t T>
size_t ScreenEncoderManager<T>::get_screen_count() const {
return count;
}
template<uint8_t T>
void ScreenEncoderManager<T>::remove_screen(uint8_t index) {
if (screens[index].encoder) {
delete screens[index].encoder;
screens[index].encoder = nullptr;
mask &= ~(1 << index);
--count;
rebuild_screens_to_refresh();
}
screens[index] = {};
}
template<uint8_t T>
void ScreenEncoderManager<T>::rebuild_screens_to_refresh() {
screens_to_refresh.clear();
uint64_t remaining_mask = mask;
while (remaining_mask) {
const auto pos = __builtin_ctzll(remaining_mask);
if (screens[pos].dirty)
screens_to_refresh.push_back(pos);
remaining_mask &= remaining_mask - 1;
}
}
template<uint8_t T>
ScreenEncoderManager<T>::stats_t ScreenEncoderManager<T>::get_stats() const {
return stats;
}
template<uint8_t T>
bool ScreenEncoderManager<T>::sync_layout(const ScreenSet &layout, const Region &region) {
const auto bounds = region.get_bounding_rect();
for (uint8_t i = 0; i < static_cast<uint8_t>(layout.num_screens()); ++i) {
const auto &screen = layout.screens[i];
auto id = screen.id;
if (id > T) {
vlog.error("Wrong id");
id = 0;
}
if (!screens[id].layout.dimensions.equals(screen.dimensions)) {
remove_screen(id);
if (!add_screen(id, screen))
return false;
}
if (screen.dimensions.overlaps(bounds)) {
screens[id].dirty = true;
}
}
return true;
}
template<uint8_t T>
bool ScreenEncoderManager<T>::isSupported() const {
if (const auto *encoder = screens[head].encoder; encoder)
return encoder->isSupported();
return false;
}
template<uint8_t T>
void ScreenEncoderManager<T>::writeRect(const PixelBuffer *pb, const Palette &palette) {
// if (!pb) {
// vlog.error("writeRect called with null PixelBuffer");
// return;
// }
if (screens_to_refresh.empty())
return;
const auto bpp = conn->cp.pf().bpp >> 3;
auto *out_conn = conn->getOutStream(conn->cp.supportsUdp);
if (!out_conn) {
vlog.error("writeRect: getOutStream returned NULL");
return;
}
const auto send_frame = [this, &bpp, out_conn, pb, &palette](const screen_t &screen) {
++stats.rects;
const auto &rect = screen.layout.dimensions;
const auto area = rect.area();
stats.pixels += area;
const auto before = out_conn->length();
const int equiv = 12 + (area * bpp);
stats.equivalent += equiv;
const auto &encoder = screen.encoder;
conn->writer()->startRect(rect, encoder->encoding);
encoder->writeRect(pb, palette);
conn->writer()->endRect();
const auto after = out_conn->length();
stats.bytes += after - before;
};
if (screens_to_refresh.size() > 1) {
tbb::parallel_for_each(screens_to_refresh.begin(), screens_to_refresh.end(), [this, pb, &send_frame](uint8_t index) {
auto &screen = screens[index];
if (auto *encoder = screen.encoder; encoder) {
screen.dirty = encoder->render(pb);
}
});
for (auto index: screens_to_refresh) {
auto &screen = screens[index];
if (screen.dirty) {
send_frame(screen);
screen.dirty = false;
}
}
} else {
if (auto encoder = screens[head].encoder; encoder) {
if (encoder->render(pb))
send_frame(screens[head]);
}
}
}
template<uint8_t T>
void ScreenEncoderManager<T>::writeSolidRect(int width, int height, const PixelFormat &pf, const rdr::U8 *colour) {
for (const auto index: screens_to_refresh) {
if (auto *encoder = screens[index].encoder; encoder)
encoder->writeSolidRect(width, height, pf, colour);
}
}
} // namespace rfb

View File

@ -0,0 +1,111 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include <tbb/spin_mutex.h>
#include <vector>
#include "KasmVideoConstants.h"
#include "VideoEncoder.h"
#include "rfb/Encoder.h"
#include "rfb/ffmpeg.h"
inline constexpr uint8_t MAX_SCREENS = 8;
namespace rfb {
template<uint8_t T = MAX_SCREENS>
class ScreenEncoderManager final : public Encoder {
static_assert(
T <= std::numeric_limits<uint64_t>::digits, "ScreenEncoderManager mask should be changed as current mask supports T <= 64");
struct screen_t {
Screen layout{};
VideoEncoder *encoder{};
bool dirty{};
};
uint8_t head{};
uint8_t count{};
uint64_t mask{};
std::vector<uint8_t> screens_to_refresh;
tbb::spin_mutex conn_mutex;
std::array<screen_t, T> screens{};
const FFmpeg &ffmpeg;
VideoEncoderParams current_params;
KasmVideoEncoders::Encoder base_video_encoder;
std::vector<KasmVideoEncoders::Encoder> available_encoders;
const char *dri_node{};
[[nodiscard]] VideoEncoder *add_encoder(const Screen &layout) const;
bool add_screen(uint8_t index, const Screen &layout);
[[nodiscard]] size_t get_screen_count() const;
void remove_screen(uint8_t index);
void rebuild_screens_to_refresh();
public:
struct stats_t {
uint64_t rects{};
uint64_t pixels{};
uint64_t bytes{};
uint64_t equivalent{};
};
[[nodiscard]] stats_t get_stats() const;
// Iterator
using iterator = typename std::array<screen_t, T>::iterator;
using const_iterator = typename std::array<screen_t, T>::const_iterator;
iterator begin() {
return screens.begin();
}
iterator end() {
return screens.end();
}
[[nodiscard]] const_iterator cbegin() const {
return screens.begin();
}
[[nodiscard]] const_iterator cend() const {
return screens.end();
}
explicit ScreenEncoderManager(const FFmpeg &ffmpeg_, KasmVideoEncoders::Encoder encoder,
const std::vector<KasmVideoEncoders::Encoder> &encoders, SConnection *conn, const char *dri_node, VideoEncoderParams params);
~ScreenEncoderManager() override;
ScreenEncoderManager(const ScreenEncoderManager &) = delete;
ScreenEncoderManager &operator=(const ScreenEncoderManager &) = delete;
ScreenEncoderManager(ScreenEncoderManager &&) = delete;
ScreenEncoderManager &operator=(ScreenEncoderManager &&) = delete;
bool sync_layout(const ScreenSet &layout, const Region &region);
[[nodiscard]] KasmVideoEncoders::Encoder get_encoder() const {
return base_video_encoder;
}
// Encoder
[[nodiscard]] bool isSupported() const override;
void writeRect(const PixelBuffer *pb, const Palette &palette) override;
void writeSolidRect(int width, int height, const PixelFormat &pf, const rdr::U8 *colour) override;
private:
stats_t stats{};
};
template class ScreenEncoderManager<>;
} // namespace rfb

View File

@ -0,0 +1,271 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include "SoftwareEncoder.h"
extern "C" {
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
}
#include "KasmVideoConstants.h"
#include <rfb/LogWriter.h>
#include <rfb/SConnection.h>
#include <rfb/ServerCore.h>
#include <rfb/encodings.h>
#include <rfb/ffmpeg.h>
#include <fmt/format.h>
#include <rfb/encoders/utils.h>
static rfb::LogWriter vlog("SoftwareEncoder");
namespace rfb {
SoftwareEncoder::SoftwareEncoder(Screen layout_, const FFmpeg &ffmpeg_, SConnection *conn, KasmVideoEncoders::Encoder encoder_,
VideoEncoderParams params) :
VideoEncoder(layout_.id, conn), layout(layout_),
ffmpeg(ffmpeg_), encoder(encoder_), current_params(params), msg_codec_id(KasmVideoEncoders::to_msg_id(encoder)) {
const auto *enc_name = KasmVideoEncoders::to_string(encoder);
codec = ffmpeg.avcodec_find_encoder_by_name(enc_name);
if (!codec)
throw std::runtime_error(fmt::format("Could not find {} encoder", enc_name));
auto *frame = ffmpeg.av_frame_alloc();
if (!frame) {
throw std::runtime_error("Cannot allocate AVFrame");
}
frame_guard.reset(frame);
auto *pkt = ffmpeg.av_packet_alloc();
if (!pkt)
throw std::runtime_error("Could not allocate packet");
pkt_guard.reset(pkt);
}
bool SoftwareEncoder::isSupported() const {
return conn->cp.supportsEncoding(encodingKasmVideo);
}
bool SoftwareEncoder::render(const PixelBuffer *pb) {
// compress
int stride;
const auto rect = layout.dimensions;
const auto *buffer = pb->getBuffer(rect, &stride);
const int width = rect.width();
const int height = rect.height();
auto *frame = frame_guard.get();
int dst_width = width;
int dst_height = height;
if (width % 2 != 0)
dst_width = width & ~1;
if (height % 2 != 0)
dst_height = height & ~1;
VideoEncoderParams params{dst_width,
dst_height,
static_cast<uint8_t>(Server::frameRate),
static_cast<uint8_t>(Server::groupOfPicture),
static_cast<uint8_t>(Server::videoQualityCRFCQP)};
if (current_params != params) {
bpp = pb->getPF().bpp >> 3;
if (!init(width, height, params)) {
vlog.error("Failed to initialize encoder");
return false;
}
frame = frame_guard.get();
} else {
frame->pict_type = AV_PICTURE_TYPE_NONE;
}
const uint8_t *src_data[1] = {buffer};
const int src_line_size[1] = {stride * bpp}; // RGB has bpp bytes per pixel
if (ffmpeg.sws_scale(sws_guard.get(), src_data, src_line_size, 0, height, frame->data, frame->linesize) < 0) {
vlog.error("Error while scaling image");
return false;
}
frame->pts = pts++;
int err = ffmpeg.avcodec_send_frame(ctx_guard.get(), frame);
if (err < 0) {
vlog.error("Error sending frame to codec (%s). Error code: %d", ffmpeg.get_error_description(err).c_str(), err);
return false;
}
auto *pkt = pkt_guard.get();
err = ffmpeg.avcodec_receive_packet(ctx_guard.get(), pkt);
if (err == AVERROR(EAGAIN) || err == AVERROR_EOF) {
// Trying one more time
err = ffmpeg.avcodec_send_frame(ctx_guard.get(), nullptr);
err = ffmpeg.avcodec_receive_packet(ctx_guard.get(), pkt);
}
if (err < 0) {
vlog.error("Error receiving packet from codec");
writeSkipRect();
return false;
}
if (pkt->flags & AV_PKT_FLAG_KEY)
vlog.debug("Key frame %ld", frame->pts);
return true;
}
void SoftwareEncoder::writeRect(const PixelBuffer *pb, const Palette &palette) {
auto *pkt = pkt_guard.get();
auto *os = conn->getOutStream(conn->cp.supportsUdp);
os->writeU8(layout.id);
os->writeU8(msg_codec_id);
os->writeU8(pkt->flags & AV_PKT_FLAG_KEY);
encoders::write_compact(os, pkt->size);
os->writeBytes(&pkt->data[0], pkt->size);
vlog.debug("Screen id %d, codec %d, frame size: %d", layout.id, msg_codec_id, pkt->size);
ffmpeg.av_packet_unref(pkt);
}
void SoftwareEncoder::writeSolidRect(int width, int height, const PixelFormat &pf, const rdr::U8 *colour) {}
void SoftwareEncoder::writeSkipRect() {
auto *os = conn->getOutStream(conn->cp.supportsUdp);
os->writeU8(layout.id);
os->writeU8(kasmVideoSkip);
}
bool SoftwareEncoder::init(int width, int height, VideoEncoderParams params) {
current_params = params;
vlog.debug("FRAME RESIZE (%d, %d): RATE: %d, GOP: %d, QUALITY: %d", width, height, current_params.frame_rate, current_params.group_of_picture, current_params.quality);
auto *ctx = ffmpeg.avcodec_alloc_context3(codec);
if (!ctx) {
vlog.error("Cannot allocate AVCodecContext");
return false;
}
ctx_guard.reset(ctx);
ctx->time_base = {1, params.frame_rate};
ctx->framerate = {params.frame_rate, 1};
ctx->gop_size = params.group_of_picture; // interval between I-frames
ctx->width = current_params.width;
ctx->height = current_params.height;
ctx->coded_width = current_params.width;
ctx->coded_height = current_params.height;
// best
// ctx->pix_fmt = AV_PIX_FMT_YUV444P; // AV_PIX_FMT_YUV420P;
ctx->pix_fmt = AV_PIX_FMT_YUV420P;
ctx->max_b_frames = 0; // No B-frames for immediate output
// HIGH
// if (ffmpeg.av_opt_set(ctx->priv_data, "tune", "zerolatency,stillimage", 0) != 0)
// return false;
//
// // start here, lower (2022) = better quality,
// // higher (2428) = lower bitrate
// if (ffmpeg.av_opt_set(ctx->priv_data, "crf", "18", 0) != 0)
// return false;
//
// // Preset: speed vs. compression efficiency
// if (ffmpeg.av_opt_set(ctx->priv_data, "preset", "medium", 0) != 0)
// return false;
if (ffmpeg.av_opt_set(ctx->priv_data, "tune", "zerolatency", 0) < 0) {
vlog.info("Cannot set tune to zerolatency");
}
if (ffmpeg.av_opt_set(ctx->priv_data, "preset", "ultrafast", 0) < 0) {
vlog.info("Cannot set preset to ultrafast");
}
if (encoder == KasmVideoEncoders::Encoder::av1_software) {
if (ffmpeg.av_opt_set(ctx->priv_data, "preset", "12", 0) < 0) {
vlog.info("Cannot set preset to 8");
}
if (ffmpeg.av_opt_set(ctx->priv_data, "svtav1-params", "rtc=1", 0) < 0) {
vlog.info("Cannot set -svtav1-params to tune=0");
}
}
// start here, lower (2022) = better quality,
// higher (2428) = lower bitrate
if (ffmpeg.av_opt_set_int(ctx->priv_data, "crf", current_params.quality, 0) < 0) {
vlog.info("Cannot set crf to %d", current_params.quality);
}
// // Preset: speed vs. compression efficiency
// if (ffmpeg.av_opt_set(ctx->priv_data, "preset", "medium", 0) != 0)
// return false;
/*if (ffmpeg.av_opt_set(ctx->priv_data, "preset", "ultrafast", 0) != 0)
throw std::runtime_error("Could not set codec setting");*/
// "ultrafast" = lowest latency but bigger bitrate
// "veryfast" = good balance for realtime
// "medium+" = too slow for live
// H.264 profile for better compression
// if (ffmpeg.av_opt_set(ctx->priv_data, "profile", "high", 0) != 0)
// throw std::runtime_error("Could not set codec setting");
auto *sws_ctx = ffmpeg.sws_getContext(width,
height,
AV_PIX_FMT_RGB32,
current_params.width,
current_params.height,
ctx_guard->pix_fmt,
SWS_BILINEAR,
nullptr,
nullptr,
nullptr);
if (!sws_ctx) {
vlog.error("Could not initialize the conversion context");
return false;
}
sws_guard.reset(sws_ctx);
auto *frame = frame_guard.get();
ffmpeg.av_frame_unref(frame);
frame->format = ctx_guard->pix_fmt;
frame->width = current_params.width;
frame->height = current_params.height;
frame->pict_type = AV_PICTURE_TYPE_I;
if (ffmpeg.av_frame_get_buffer(frame, 0) < 0) {
vlog.error("Could not allocate frame data");
return false;
}
if (ffmpeg.avcodec_open2(ctx_guard.get(), codec, nullptr) < 0) {
vlog.error("Failed to open codec");
return false;
}
return true;
}
} // namespace rfb

View File

@ -0,0 +1,56 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include "KasmVideoConstants.h"
#include "rdr/OutStream.h"
#include "rfb/Encoder.h"
#include "rfb/encoders/VideoEncoder.h"
#include "rfb/ffmpeg.h"
namespace rfb {
class SoftwareEncoder final : public VideoEncoder {
Screen layout;
const FFmpeg &ffmpeg;
const AVCodec *codec{};
FFmpeg::FrameGuard frame_guard;
FFmpeg::PacketGuard pkt_guard;
FFmpeg::ContextGuard ctx_guard;
FFmpeg::SwsContextGuard sws_guard;
KasmVideoEncoders::Encoder encoder;
VideoEncoderParams current_params{};
uint8_t msg_codec_id;
int64_t pts{};
int bpp{};
[[nodiscard]] bool init(int width, int height, VideoEncoderParams params);
template<typename T>
friend class EncoderBuilder;
SoftwareEncoder(Screen layout, const FFmpeg &ffmpeg, SConnection *conn, KasmVideoEncoders::Encoder encoder,
VideoEncoderParams params);
public:
bool isSupported() const override;
void writeRect(const PixelBuffer *pb, const Palette &palette) override;
void writeSolidRect(int width, int height, const PixelFormat &pf, const rdr::U8 *colour) override;
bool render(const PixelBuffer *pb) override;
void writeSkipRect() override;
};
} // namespace rfb

View File

@ -0,0 +1,197 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include <algorithm>
#include <array>
#include <cstdint>
#include <string_view>
#include <vector>
#include "KasmVideoEncoders.h"
namespace rfb {
struct SupportedVideoEncoders {
enum class Codecs : uint8_t
{
h264,
h264_vaapi,
h264_nvenc,
avc,
avc_vaapi,
avc_nvenc,
h265,
h265_vaapi,
h265_nvenc,
hevc,
hevc_vaapi,
hevc_nvenc,
av1,
av1_vaapi,
av1_nvenc,
auto_detect,
unavailable // Keep this as the last entry - used for compile-time size checks
};
static constexpr auto MappedCodecs = std::to_array<KasmVideoEncoders::Encoder>({KasmVideoEncoders::Encoder::h264_software,
KasmVideoEncoders::Encoder::h264_ffmpeg_vaapi,
KasmVideoEncoders::Encoder::h264_nvenc,
KasmVideoEncoders::Encoder::h264_software,
KasmVideoEncoders::Encoder::h264_ffmpeg_vaapi,
KasmVideoEncoders::Encoder::h264_nvenc,
KasmVideoEncoders::Encoder::h265_software,
KasmVideoEncoders::Encoder::h265_ffmpeg_vaapi,
KasmVideoEncoders::Encoder::h265_nvenc,
KasmVideoEncoders::Encoder::h265_software,
KasmVideoEncoders::Encoder::h265_ffmpeg_vaapi,
KasmVideoEncoders::Encoder::h265_nvenc,
KasmVideoEncoders::Encoder::av1_software,
KasmVideoEncoders::Encoder::av1_ffmpeg_vaapi,
KasmVideoEncoders::Encoder::av1_nvenc,
KasmVideoEncoders::Encoder::h264_software,
KasmVideoEncoders::Encoder::unavailable});
static_assert(
MappedCodecs.size() == static_cast<size_t>(Codecs::unavailable) + 1, "MappedCodecs array size must match Codecs enum count");
static inline auto CodecNames = std::to_array<std::string_view>({"h264",
"h264_vaapi",
"h264_nvenc",
"avc",
"avc_vaapi",
"avc_nvenc",
"h265",
"h265_vaapi",
"h265_nvenc",
"hevc",
"hevc_vaapi",
"hevc_nvenc",
"av1",
"av1_vaapi",
"av1_nvenc",
"auto"});
static_assert(CodecNames.size() == static_cast<size_t>(Codecs::unavailable), "CodecNames array size must match Codecs enum count");
static std::string_view to_string(Codecs codec) {
return CodecNames[static_cast<uint8_t>(codec)];
}
static bool is_supported(std::string_view codec) {
if (codec.empty())
return false;
for (const auto supported_codec: CodecNames)
if (supported_codec == codec)
return true;
return false;
}
static auto get_codec(std::string_view codec) {
for (auto codec_impl: enum_range(Codecs::h264, Codecs::auto_detect)) {
if (to_string(codec_impl) == codec)
return codec_impl;
}
return Codecs::unavailable;
}
static constexpr auto map_encoder(Codecs impl) {
return MappedCodecs[static_cast<uint8_t>(impl)];
}
static std::vector<std::string_view> parse(const std::string_view codecs) {
std::vector<std::string_view> result;
if (codecs.empty())
return {};
size_t pos{};
size_t start{};
while (pos < codecs.size()) {
pos = codecs.find_first_of(',', pos);
if (pos == std::string_view::npos)
pos = codecs.size();
result.push_back(codecs.substr(start, pos - start));
start = ++pos;
}
return result;
}
static KasmVideoEncoders::Encoders map_encoders(const std::vector<std::string_view> &codecs) {
KasmVideoEncoders::Encoders result;
if (codecs.empty())
return {};
for (const auto codec_name: codecs) {
const auto codec = get_codec(codec_name);
switch (codec) {
case Codecs::auto_detect:
if (!result.empty())
result.clear();
result.push_back(map_encoder(Codecs::av1_nvenc));
result.push_back(map_encoder(Codecs::av1_vaapi));
result.push_back(map_encoder(Codecs::av1));
result.push_back(map_encoder(Codecs::h265_nvenc));
result.push_back(map_encoder(Codecs::h265_vaapi));
result.push_back(map_encoder(Codecs::h265));
result.push_back(map_encoder(Codecs::h264_nvenc));
result.push_back(map_encoder(Codecs::h264_vaapi));
result.push_back(map_encoder(Codecs::h264));
return result;
default:
{
const auto encoder = map_encoder(codec);
if (std::find(result.begin(), result.end(), encoder) == result.end())
result.push_back(encoder);
}
}
}
return result;
}
static KasmVideoEncoders::Encoders filter_available_encoders(
const KasmVideoEncoders::Encoders &encoders, const KasmVideoEncoders::Encoders &available) {
KasmVideoEncoders::Encoders result;
for (auto encoder: available) {
if (std::ranges::find(encoders.begin(), encoders.end(), encoder) != encoders.end())
result.push_back(encoder);
}
return result;
}
};
} // namespace rfb

View File

@ -0,0 +1,69 @@
#pragma once
#include <unistd.h>
#include <va/va.h>
#include "rdr/OutStream.h"
#include "rfb/Encoder.h"
#include "rfb/encoders/VideoEncoder.h"
struct fd_handle {
int fd;
fd_handle(int descriptor = -1) : fd(descriptor) {}
~fd_handle() {
if (fd >= 0)
::close(fd);
}
fd_handle(const fd_handle &) = delete;
fd_handle &operator=(const fd_handle &) = delete;
fd_handle(fd_handle &&other) noexcept : fd(other.fd) {
other.fd = -1;
}
fd_handle &operator=(fd_handle &&other) noexcept {
if (this != &other) {
if (fd >= 0)
::close(fd);
fd = other.fd;
other.fd = -1;
}
return *this;
}
operator int() const noexcept {
return fd;
} // implicit cast to int if you want
};
namespace rfb {
class VAAPIEncoder final : public VideoEncoder {
static inline VASurfaceAttrib surface_attribs[] = {
{VASurfaceAttribPixelFormat, VA_SURFACE_ATTRIB_SETTABLE, {VAGenericValueTypeInteger, {VA_FOURCC_RGBX}}},
{VASurfaceAttribPixelFormat, VA_SURFACE_ATTRIB_SETTABLE, {VAGenericValueTypeInteger, {VA_FOURCC_NV12}}}};
uint8_t frame_rate{};
int bpp{};
fd_handle fd;
VADisplay dpy;
VAConfigID config_id;
VASurfaceID rgb_surface;
VASurfaceID yuv_surface;
static void write_compact(rdr::OutStream *os, int value);
[[nodiscard]] bool init(int width, int height, int dst_width, int dst_height);
public:
VAAPIEncoder(uint32_t id, SConnection *conn, uint8_t frame_rate);
bool isSupported() const override;
void writeRect(const PixelBuffer *pb, const Palette &palette) override;
void writeSolidRect(int width, int height, const PixelFormat &pf, const rdr::U8 *colour) override;
void writeSkipRect() override;
};
} // namespace rfb

View File

@ -0,0 +1,48 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include <rfb/PixelBuffer.h>
#include "rfb/Encoder.h"
namespace rfb {
struct VideoEncoderParams {
int width{};
int height{};
uint8_t frame_rate{};
uint8_t group_of_picture{};
uint8_t quality{};
bool operator==(const VideoEncoderParams &rhs) const noexcept {
return width == rhs.width && height == rhs.height && frame_rate == rhs.frame_rate && group_of_picture == rhs.group_of_picture &&
quality == rhs.quality;
}
bool operator!=(const VideoEncoderParams &rhs) const noexcept {
return !(*this == rhs);
}
};
class VideoEncoder : public Encoder {
public:
VideoEncoder(Id id, SConnection *conn) :
Encoder(id, conn, encodingKasmVideo, static_cast<EncoderFlags>(EncoderUseNativePF | EncoderLossy), -1) {}
virtual bool render(const PixelBuffer *pb) = 0;
virtual void writeSkipRect() = 0;
~VideoEncoder() override = default;
};
} // namespace rfb

View File

@ -0,0 +1,159 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include "VideoEncoderFactory.h"
#include <cstdint>
#include "FFMPEGVAAPIEncoder.h"
#include "SoftwareEncoder.h"
#include "VAAPIEncoder.h"
namespace rfb {
class EncoderBuilderBase {
public:
virtual VideoEncoder *build() = 0;
virtual ~EncoderBuilderBase() = default;
};
template<typename T>
struct is_ffmpeg_based {
static constexpr auto value = true;
};
template<>
struct is_ffmpeg_based<VAAPIEncoder> {
static constexpr auto value = false;
};
template<typename T>
class EncoderBuilder : public EncoderBuilderBase {
static constexpr uint32_t INVALID_ID{std::numeric_limits<uint32_t>::max()};
Screen layout;
const FFmpeg *ffmpeg{};
KasmVideoEncoders::Encoder encoder{};
VideoEncoderParams params{};
SConnection *conn{};
const char *dri_node{};
explicit EncoderBuilder(const FFmpeg *ffmpeg_) :
ffmpeg(ffmpeg_) {
layout.id = INVALID_ID;
}
EncoderBuilder() = default;
public:
static EncoderBuilder create(const FFmpeg *ffmpeg) {
return EncoderBuilder{ffmpeg};
}
static EncoderBuilder create() {
return EncoderBuilder{};
}
EncoderBuilder &with_params(VideoEncoderParams value) {
params = value;
return *this;
}
EncoderBuilder &with_encoder(KasmVideoEncoders::Encoder value) {
encoder = value;
return *this;
}
EncoderBuilder &with_connection(SConnection *value) {
conn = value;
return *this;
}
EncoderBuilder &with_id(uint32_t value) {
layout.id = value;
return *this;
}
EncoderBuilder &with_layout(const Screen &layout_) {
layout = layout_;
return *this;
}
EncoderBuilder &with_dri_node(const char *path) {
dri_node = path;
return *this;
}
VideoEncoder *build() override {
if (layout.id == INVALID_ID)
throw std::runtime_error("Encoder does not have a valid id");
if (!conn)
throw std::runtime_error("Connection is required");
if constexpr (is_ffmpeg_based<T>::value) {
if (!ffmpeg)
throw std::runtime_error("FFmpeg is required");
if constexpr (std::is_same_v<T, FFMPEGVAAPIEncoder>) {
return new T(layout, *ffmpeg, conn, encoder, dri_node, params);
} else
return new T(layout, *ffmpeg, conn, encoder, params);
} else {
return new T(conn, encoder, params);
}
}
};
using FFMPEGVAAPIEncoderBuilder = EncoderBuilder<FFMPEGVAAPIEncoder>;
using VAAPIEncoderBuilder = EncoderBuilder<VAAPIEncoder>;
using SoftwareEncoderBuilder = EncoderBuilder<SoftwareEncoder>;
VideoEncoder *create_encoder(const Screen &layout, const FFmpeg *ffmpeg, SConnection *conn, KasmVideoEncoders::Encoder video_encoder,
const char *dri_node, VideoEncoderParams params) {
switch (video_encoder) {
case KasmVideoEncoders::Encoder::h264_vaapi:
case KasmVideoEncoders::Encoder::h265_vaapi:
case KasmVideoEncoders::Encoder::av1_vaapi:
// return
// H264VAAPIEncoderBuilder::create().with_connection(conn).with_frame_rate(frame_rate).with_bit_rate(bit_rate).build();
case KasmVideoEncoders::Encoder::h264_ffmpeg_vaapi:
case KasmVideoEncoders::Encoder::h265_ffmpeg_vaapi:
case KasmVideoEncoders::Encoder::av1_ffmpeg_vaapi:
return FFMPEGVAAPIEncoderBuilder::create(ffmpeg)
.with_layout(layout)
.with_connection(conn)
.with_encoder(video_encoder)
.with_params(params)
.with_dri_node(dri_node)
.build();
case KasmVideoEncoders::Encoder::h264_nvenc:
case KasmVideoEncoders::Encoder::h265_nvenc:
case KasmVideoEncoders::Encoder::av1_nvenc:
throw std::runtime_error("NVENC is not supported yet");
default:
return SoftwareEncoderBuilder::create(ffmpeg)
.with_layout(layout)
.with_connection(conn)
.with_encoder(video_encoder)
.with_params(params)
.build();
}
}
} // namespace rfb

View File

@ -0,0 +1,28 @@
/* Copyright (C) 2025 Kasm. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#pragma once
#include "KasmVideoConstants.h"
#include "VideoEncoder.h"
#include "rfb/Encoder.h"
#include "rfb/ffmpeg.h"
namespace rfb {
VideoEncoder *create_encoder(const Screen &layout, const FFmpeg *ffmpeg, SConnection *conn, KasmVideoEncoders::Encoder video_encoder, const char *dri_node, VideoEncoderParams params);
} // namespace rfb

View File

@ -0,0 +1,19 @@
#include "utils.h"
namespace rfb::encoders {
void write_compact(rdr::OutStream *os, int value) {
auto b = value & 0x7F;
if (value <= 0x7F) {
os->writeU8(b);
} else {
os->writeU8(b | 0x80);
b = value >> 7 & 0x7F;
if (value <= 0x3FFF) {
os->writeU8(b);
} else {
os->writeU8(b | 0x80);
os->writeU8(value >> 14 & 0xFF);
}
}
}
} // namespace rfb::encoders

View File

@ -0,0 +1,7 @@
#pragma once
#include "rdr/OutStream.h"
namespace rfb::encoders {
void write_compact(rdr::OutStream *os, int value);
} // namespace rfb::encoders

View File

@ -28,6 +28,7 @@ int rfb::encodingNum(const char* name)
if (strcasecmp(name, "hextile") == 0) return encodingHextile;
if (strcasecmp(name, "ZRLE") == 0) return encodingZRLE;
if (strcasecmp(name, "Tight") == 0) return encodingTight;
if (strcasecmp(name, "KasmVideo") == 0) return encodingKasmVideo;
return -1;
}
@ -41,6 +42,7 @@ const char* rfb::encodingName(int num)
case encodingHextile: return "hextile";
case encodingZRLE: return "ZRLE";
case encodingTight: return "Tight";
case encodingKasmVideo: return "KasmVideo";
default: return "[unknown encoding]";
}
}

View File

@ -21,79 +21,109 @@
namespace rfb {
const int encodingRaw = 0;
const int encodingCopyRect = 1;
const int encodingRRE = 2;
const int encodingCoRRE = 4;
const int encodingHextile = 5;
const int encodingTight = 7;
const int encodingUdp = 8;
const int encodingZRLE = 16;
constexpr int encodingRaw = 0;
constexpr int encodingCopyRect = 1;
constexpr int encodingRRE = 2;
constexpr int encodingCoRRE = 4;
constexpr int encodingHextile = 5;
constexpr int encodingTight = 7;
constexpr int encodingUdp = 8;
constexpr int encodingZRLE = 16;
constexpr int encodingKasmVideo = 17;
const int encodingMax = 255;
constexpr int encodingMax = 255;
const int pseudoEncodingXCursor = -240;
const int pseudoEncodingCursor = -239;
const int pseudoEncodingDesktopSize = -223;
const int pseudoEncodingLEDState = -261;
const int pseudoEncodingExtendedDesktopSize = -308;
const int pseudoEncodingDesktopName = -307;
const int pseudoEncodingFence = -312;
const int pseudoEncodingContinuousUpdates = -313;
const int pseudoEncodingCursorWithAlpha = -314;
const int pseudoEncodingQEMUKeyEvent = -258;
constexpr int pseudoEncodingXCursor = -240;
constexpr int pseudoEncodingCursor = -239;
constexpr int pseudoEncodingDesktopSize = -223;
constexpr int pseudoEncodingLEDState = -261;
constexpr int pseudoEncodingExtendedDesktopSize = -308;
constexpr int pseudoEncodingDesktopName = -307;
constexpr int pseudoEncodingFence = -312;
constexpr int pseudoEncodingContinuousUpdates = -313;
constexpr int pseudoEncodingCursorWithAlpha = -314;
constexpr int pseudoEncodingQEMUKeyEvent = -258;
// TightVNC-specific
const int pseudoEncodingLastRect = -224;
const int pseudoEncodingQualityLevel0 = -32;
const int pseudoEncodingQualityLevel9 = -23;
const int pseudoEncodingCompressLevel0 = -256;
const int pseudoEncodingCompressLevel9 = -247;
constexpr int pseudoEncodingLastRect = -224;
constexpr int pseudoEncodingQualityLevel0 = -32;
constexpr int pseudoEncodingQualityLevel9 = -23;
constexpr int pseudoEncodingCompressLevel0 = -256;
constexpr int pseudoEncodingCompressLevel9 = -247;
// TurboVNC-specific
const int pseudoEncodingFineQualityLevel0 = -512;
const int pseudoEncodingFineQualityLevel100 = -412;
const int pseudoEncodingSubsamp1X = -768;
const int pseudoEncodingSubsamp4X = -767;
const int pseudoEncodingSubsamp2X = -766;
const int pseudoEncodingSubsampGray = -765;
const int pseudoEncodingSubsamp8X = -764;
const int pseudoEncodingSubsamp16X = -763;
constexpr int pseudoEncodingFineQualityLevel0 = -512;
constexpr int pseudoEncodingFineQualityLevel100 = -412;
constexpr int pseudoEncodingSubsamp1X = -768;
constexpr int pseudoEncodingSubsamp4X = -767;
constexpr int pseudoEncodingSubsamp2X = -766;
constexpr int pseudoEncodingSubsampGray = -765;
constexpr int pseudoEncodingSubsamp8X = -764;
constexpr int pseudoEncodingSubsamp16X = -763;
// Kasm-specific
const int pseudoEncodingWEBP = -1024;
const int pseudoEncodingJpegVideoQualityLevel0 = -1023;
const int pseudoEncodingJpegVideoQualityLevel9 = -1014;
const int pseudoEncodingWebpVideoQualityLevel0 = -1013;
const int pseudoEncodingWebpVideoQualityLevel9 = -1004;
const int pseudoEncodingTreatLosslessLevel0 = -1003;
const int pseudoEncodingTreatLosslessLevel10 = -993;
const int pseudoEncodingPreferBandwidth = -992;
const int pseudoEncodingDynamicQualityMinLevel0 = -991;
const int pseudoEncodingDynamicQualityMinLevel9 = -982;
const int pseudoEncodingDynamicQualityMaxLevel0 = -981;
const int pseudoEncodingDynamicQualityMaxLevel9 = -972;
const int pseudoEncodingVideoAreaLevel1 = -971;
const int pseudoEncodingVideoAreaLevel100 = -871;
const int pseudoEncodingVideoTimeLevel0 = -870;
const int pseudoEncodingVideoTimeLevel100 = -770;
constexpr int pseudoEncodingWEBP = -1024;
constexpr int pseudoEncodingJpegVideoQualityLevel0 = -1023;
constexpr int pseudoEncodingJpegVideoQualityLevel9 = -1014;
constexpr int pseudoEncodingWebpVideoQualityLevel0 = -1013;
constexpr int pseudoEncodingWebpVideoQualityLevel9 = -1004;
constexpr int pseudoEncodingTreatLosslessLevel0 = -1003;
constexpr int pseudoEncodingTreatLosslessLevel10 = -993;
constexpr int pseudoEncodingPreferBandwidth = -992;
constexpr int pseudoEncodingDynamicQualityMinLevel0 = -991;
constexpr int pseudoEncodingDynamicQualityMinLevel9 = -982;
constexpr int pseudoEncodingDynamicQualityMaxLevel0 = -981;
constexpr int pseudoEncodingDynamicQualityMaxLevel9 = -972;
constexpr int pseudoEncodingVideoAreaLevel1 = -971;
constexpr int pseudoEncodingVideoAreaLevel100 = -871;
constexpr int pseudoEncodingVideoTimeLevel0 = -870;
constexpr int pseudoEncodingVideoTimeLevel100 = -770;
const int pseudoEncodingFrameRateLevel10 = -2048;
const int pseudoEncodingFrameRateLevel60 = -1998;
const int pseudoEncodingMaxVideoResolution = -1997;
const int pseudoEncodingVideoScalingLevel0 = -1996;
const int pseudoEncodingVideoScalingLevel9 = -1987;
const int pseudoEncodingVideoOutTimeLevel1 = -1986;
const int pseudoEncodingVideoOutTimeLevel100 = -1887;
const int pseudoEncodingQOI = -1886;
const int pseudoEncodingKasmDisconnectNotify = -1885;
constexpr int pseudoEncodingFrameRateLevel10 = -2048;
constexpr int pseudoEncodingFrameRateLevel60 = -1998;
constexpr int pseudoEncodingMaxVideoResolution = -1997;
constexpr int pseudoEncodingVideoScalingLevel0 = -1996;
constexpr int pseudoEncodingVideoScalingLevel9 = -1987;
constexpr int pseudoEncodingVideoOutTimeLevel1 = -1986;
constexpr int pseudoEncodingVideoOutTimeLevel100 = -1887;
constexpr int pseudoEncodingQOI = -1886;
constexpr int pseudoEncodingKasmDisconnectNotify = -1885;
constexpr int pseudoEncodingHardwareProfile0 = -1170;
constexpr int pseudoEncodingHardwareProfile4 = -1166;
constexpr int pseudoEncodingGOP1 = -1165;
constexpr int pseudoEncodingGOP60 = -1105;
constexpr int pseudoEncodingStreamingVideoQualityLevel0 = -1104;
constexpr int pseudoEncodingStreamingVideoQualityLevel63 = -1041;
// AV1
constexpr int pseudoEncodingStreamingModeAV1QSV = -1040;
constexpr int pseudoEncodingStreamingModeAV1NVENC = -1039;
constexpr int pseudoEncodingStreamingModeAV1VAAPI = -1038;
constexpr int pseudoEncodingStreamingModeAV1SW = -1037;
constexpr int pseudoEncodingStreamingModeAV1 = -1036;
// h.265
constexpr int pseudoEncodingStreamingModeHEVCQSV = -1035;
constexpr int pseudoEncodingStreamingModeHEVCNVENC = -1034;
constexpr int pseudoEncodingStreamingModeHEVCVAAPI = -1033;
constexpr int pseudoEncodingStreamingModeHEVCSW = -1032;
constexpr int pseudoEncodingStreamingModeHEVC = -1031;
// h.264
constexpr int pseudoEncodingStreamingModeAVCQSV = -1030;
constexpr int pseudoEncodingStreamingModeAVCNVENC = -1029;
constexpr int pseudoEncodingStreamingModeAVCVAAPI = -1028;
constexpr int pseudoEncodingStreamingModeAVCSW = -1027;
constexpr int pseudoEncodingStreamingModeAVC = -1026;
constexpr int pseudoEncodingStreamingModeJpegWebp = -1025;
// VMware-specific
const int pseudoEncodingVMwareCursor = 0x574d5664;
const int pseudoEncodingVMwareCursorPosition = 0x574d5666;
constexpr int pseudoEncodingVMwareCursor = 0x574d5664;
constexpr int pseudoEncodingVMwareCursorPosition = 0x574d5666;
// UltraVNC-specific
const int pseudoEncodingExtendedClipboard = 0xC0A1E5CE;
constexpr int pseudoEncodingExtendedClipboard = 0xC0A1E5CE;
int encodingNum(const char* name);
const char* encodingName(int num);

View File

@ -21,16 +21,13 @@
#include <rdr/types.h>
namespace rfb {
const rdr::U32 fenceFlagBlockBefore = 1<<0;
const rdr::U32 fenceFlagBlockAfter = 1<<1;
const rdr::U32 fenceFlagSyncNext = 1<<2;
constexpr rdr::U32 fenceFlagBlockBefore = 1<<0;
constexpr rdr::U32 fenceFlagBlockAfter = 1<<1;
constexpr rdr::U32 fenceFlagSyncNext = 1<<2;
const rdr::U32 fenceFlagRequest = 1<<31;
constexpr rdr::U32 fenceFlagRequest = 1<<31;
const rdr::U32 fenceFlagsSupported = (fenceFlagBlockBefore |
fenceFlagBlockAfter |
fenceFlagSyncNext |
fenceFlagRequest);
constexpr rdr::U32 fenceFlagsSupported = fenceFlagBlockBefore | fenceFlagBlockAfter | fenceFlagSyncNext | fenceFlagRequest;
}
#endif

View File

@ -1,5 +1,5 @@
/* Copyright (C) 2025 Kasm Technologies Corp
*
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@ -18,14 +18,14 @@
#include "ffmpeg.h"
#include <array>
#include <string_view>
#include <filesystem>
#include "LogWriter.h"
FFmpegFrameFeeder::FFmpegFrameFeeder() {
static constexpr std::array<std::string_view, 2> paths = {
"/usr/lib/",
"/usr/lib64"
};
static rfb::LogWriter vlog("ffmpeg");
FFmpeg::FFmpeg() {
static constexpr std::array<std::string_view, 2> paths = {"/usr/lib/", "/usr/lib64"};
namespace fs = std::filesystem;
using namespace std::string_literals;
@ -56,155 +56,88 @@ FFmpegFrameFeeder::FFmpegFrameFeeder() {
};
// libavformat
libavformat = load_lib("libavformat.so");
auto handle = libavformat.get();
try {
libavformat = load_lib("libavformat.so");
auto handle = libavformat.get();
avformat_open_input_f = D_LOOKUP_SYM(handle, avformat_open_input);
avformat_find_stream_info_f = D_LOOKUP_SYM(handle, avformat_find_stream_info);
avcodec_find_decoder_f = D_LOOKUP_SYM(handle, avcodec_find_decoder);
avcodec_parameters_to_context_f = D_LOOKUP_SYM(handle, avcodec_parameters_to_context);
av_read_frame_f = D_LOOKUP_SYM(handle, av_read_frame);
av_seek_frame_f = D_LOOKUP_SYM(handle, av_seek_frame);
avformat_close_input_f = D_LOOKUP_SYM(handle, avformat_close_input);
avformat_open_input_f = D_LOOKUP_SYM(handle, avformat_open_input);
avformat_find_stream_info_f = D_LOOKUP_SYM(handle, avformat_find_stream_info);
avcodec_find_decoder_f = D_LOOKUP_SYM(handle, avcodec_find_decoder);
avcodec_parameters_to_context_f = D_LOOKUP_SYM(handle, avcodec_parameters_to_context);
av_read_frame_f = D_LOOKUP_SYM(handle, av_read_frame);
av_seek_frame_f = D_LOOKUP_SYM(handle, av_seek_frame);
avformat_close_input_f = D_LOOKUP_SYM(handle, avformat_close_input);
vlog.info("libavformat.so loaded");
vlog.info("libavformat.so loaded");
// libavutil
libavutil = load_lib("libavutil.so");
handle = libavutil.get();
// libavutil
libavutil = load_lib("libavutil.so");
handle = libavutil.get();
av_frame_free_f = D_LOOKUP_SYM(handle, av_frame_free);
av_frame_alloc_f = D_LOOKUP_SYM(handle, av_frame_alloc);
av_frame_get_buffer_f = D_LOOKUP_SYM(handle, av_frame_get_buffer);
av_frame_free_f = D_LOOKUP_SYM(handle, av_frame_free);
av_frame_alloc_f = D_LOOKUP_SYM(handle, av_frame_alloc);
av_frame_unref_f = D_LOOKUP_SYM(handle, av_frame_unref);
av_frame_get_buffer_f = D_LOOKUP_SYM(handle, av_frame_get_buffer);
av_opt_next_f = D_LOOKUP_SYM(handle, av_opt_next);
av_opt_set_f = D_LOOKUP_SYM(handle, av_opt_set);
av_opt_set_int_f = D_LOOKUP_SYM(handle, av_opt_set_int);
av_buffer_unref_f = D_LOOKUP_SYM(handle, av_buffer_unref);
av_hwdevice_ctx_create_f = D_LOOKUP_SYM(handle, av_hwdevice_ctx_create);
av_hwframe_ctx_alloc_f = D_LOOKUP_SYM(handle, av_hwframe_ctx_alloc);
av_hwframe_ctx_init_f = D_LOOKUP_SYM(handle, av_hwframe_ctx_init);
av_buffer_ref_f = D_LOOKUP_SYM(handle, av_buffer_ref);
av_hwframe_get_buffer_f = D_LOOKUP_SYM(handle, av_hwframe_get_buffer);
av_hwframe_transfer_data_f = D_LOOKUP_SYM(handle, av_hwframe_transfer_data);
av_strerror_f = D_LOOKUP_SYM(handle, av_strerror);
av_log_set_level_f = D_LOOKUP_SYM(handle, av_log_set_level);
av_log_set_callback_f = D_LOOKUP_SYM(handle, av_log_set_callback);
vlog.info("libavutil.so loaded");
vlog.info("libavutil.so loaded");
// libswscale
libswscale = load_lib("libswscale.so");
handle = libswscale.get();
// libswscale
libswscale = load_lib("libswscale.so");
handle = libswscale.get();
sws_freeContext_f = D_LOOKUP_SYM(handle, sws_freeContext);
sws_getContext_f = D_LOOKUP_SYM(handle, sws_getContext);
sws_scale_f = D_LOOKUP_SYM(handle, sws_scale);
sws_freeContext_f = D_LOOKUP_SYM(handle, sws_freeContext);
sws_getContext_f = D_LOOKUP_SYM(handle, sws_getContext);
sws_scale_f = D_LOOKUP_SYM(handle, sws_scale);
// libavcodec
libavcodec = load_lib("libavcodec.so");
handle = libavcodec.get();
// libavcodec
libavcodec = load_lib("libavcodec.so");
handle = libavcodec.get();
avcodec_open2_f = D_LOOKUP_SYM(handle, avcodec_open2);
avcodec_alloc_context3_f = D_LOOKUP_SYM(handle, avcodec_alloc_context3);
avcodec_send_packet_f = D_LOOKUP_SYM(handle, avcodec_send_packet);
avcodec_receive_frame_f = D_LOOKUP_SYM(handle, avcodec_receive_frame);
av_packet_unref_f = D_LOOKUP_SYM(handle, av_packet_unref);
avcodec_flush_buffers_f = D_LOOKUP_SYM(handle, avcodec_flush_buffers);
avcodec_close_f = D_LOOKUP_SYM(handle, avcodec_close);
av_packet_alloc_f = D_LOOKUP_SYM(handle, av_packet_alloc);
av_packet_free_f = D_LOOKUP_SYM(handle, av_packet_free);
}
avcodec_free_context_f = D_LOOKUP_SYM(handle, avcodec_free_context);
avcodec_open2_f = D_LOOKUP_SYM(handle, avcodec_open2);
avcodec_find_encoder_f = D_LOOKUP_SYM(handle, avcodec_find_encoder);
avcodec_find_encoder_by_name_f = D_LOOKUP_SYM(handle, avcodec_find_encoder_by_name);
avcodec_alloc_context3_f = D_LOOKUP_SYM(handle, avcodec_alloc_context3);
avcodec_send_frame_f = D_LOOKUP_SYM(handle, avcodec_send_frame);
avcodec_send_packet_f = D_LOOKUP_SYM(handle, avcodec_send_packet);
avcodec_receive_frame_f = D_LOOKUP_SYM(handle, avcodec_receive_frame);
avcodec_receive_packet_f = D_LOOKUP_SYM(handle, avcodec_receive_packet);
av_packet_unref_f = D_LOOKUP_SYM(handle, av_packet_unref);
avcodec_flush_buffers_f = D_LOOKUP_SYM(handle, avcodec_flush_buffers);
avcodec_close_f = D_LOOKUP_SYM(handle, avcodec_close);
av_codec_is_encoder_f = D_LOOKUP_SYM(handle, av_codec_is_encoder);
av_packet_alloc_f = D_LOOKUP_SYM(handle, av_packet_alloc);
av_packet_free_f = D_LOOKUP_SYM(handle, av_packet_free);
FFmpegFrameFeeder::~FFmpegFrameFeeder() {
avformat_close_input_f(&format_ctx);
avcodec_close_f(codec_ctx);
avcodec_free_context_f(&codec_ctx);
}
av_log_set_level_f(AV_LOG_VERBOSE); // control what is emitted
av_log_set_callback_f(av_log_callback);
void FFmpegFrameFeeder::open(const std::string_view path) {
if (avformat_open_input_f(&format_ctx, path.data(), nullptr, nullptr) < 0)
throw std::runtime_error("Could not open video file");
available = true;
} catch (std::exception &e) {
vlog.error("%s", e.what());
// Find stream info
if (avformat_find_stream_info_f(format_ctx, nullptr) < 0)
throw std::runtime_error("Could not find stream info");
// Find video stream
for (uint32_t i = 0; i < format_ctx->nb_streams; ++i) {
if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_idx = static_cast<int>(i);
break;
}
return;
}
if (video_stream_idx == -1)
throw std::runtime_error("No video stream found");
// Get codec parameters and decoder
const auto *codec_parameters = format_ctx->streams[video_stream_idx]->codecpar;
const auto *codec = avcodec_find_decoder_f(codec_parameters->codec_id);
if (!codec)
throw std::runtime_error("Codec not found");
codec_ctx = avcodec_alloc_context3_f(codec);
if (!codec_ctx || avcodec_parameters_to_context_f(codec_ctx, codec_parameters) < 0)
throw std::runtime_error("Failed to set up codec context");
if (avcodec_open2_f(codec_ctx, codec, nullptr) < 0)
throw std::runtime_error("Could not open codec");
}
FFmpegFrameFeeder::play_stats_t FFmpegFrameFeeder::play(benchmarking::MockTestConnection *connection) const {
// Allocate frame and packet
const FrameGuard frame{av_frame_alloc_f()};
const PacketGuard packet{av_packet_alloc_f()};
void FFmpeg::av_log_callback(void *ptr, int level, const char *fmt, va_list vl) {
if (level > AV_LOG_VERBOSE)
return;
if (!frame || !packet)
throw std::runtime_error("Could not allocate frame or packet");
// Scaling context to convert to RGB24
SwsContext *sws_ctx = sws_getContext_f(
codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24,
SWS_BILINEAR, nullptr, nullptr, nullptr
);
if (!sws_ctx)
throw std::runtime_error("Could not create scaling context");
const std::unique_ptr<SwsContext, void(*)(SwsContext *)> sws_ctx_guard{sws_ctx, sws_freeContext_f};
const FrameGuard rgb_frame{av_frame_alloc_f()};
if (!rgb_frame)
throw std::runtime_error("Could not allocate frame");
rgb_frame->format = AV_PIX_FMT_RGB24;
rgb_frame->width = codec_ctx->width;
rgb_frame->height = codec_ctx->height;
if (av_frame_get_buffer_f(rgb_frame.get(), 0) != 0)
throw std::runtime_error("Could not allocate frame data");
play_stats_t stats{};
const auto total_frame_count = get_total_frame_count();
stats.timings.reserve(total_frame_count > 0 ? total_frame_count : 2048);
while (av_read_frame_f(format_ctx, packet.get()) == 0) {
if (packet->stream_index == video_stream_idx) {
if (avcodec_send_packet_f(codec_ctx, packet.get()) == 0) {
while (avcodec_receive_frame_f(codec_ctx, frame.get()) == 0) {
// Convert to RGB
sws_scale_f(sws_ctx_guard.get(), frame->data, frame->linesize, 0,
frame->height,
rgb_frame->data, rgb_frame->linesize);
connection->framebufferUpdateStart();
connection->setNewFrame(rgb_frame.get());
using namespace std::chrono;
auto now = high_resolution_clock::now();
connection->framebufferUpdateEnd();
const auto duration = duration_cast<milliseconds>(high_resolution_clock::now() - now).count();
//vlog.info("Frame took %lu ms", duration);
stats.total += duration;
stats.timings.push_back(duration);
}
}
}
av_packet_unref_f(packet.get());
}
if (av_seek_frame_f(format_ctx, video_stream_idx, 0, AVSEEK_FLAG_BACKWARD) < 0)
throw std::runtime_error("Could not seek to start of video");
avcodec_flush_buffers_f(codec_ctx);
return stats;
char buffer[1024];
vsnprintf(buffer, sizeof(buffer), fmt, vl);
vlog.debug("[FFmpeg Debug] %s", buffer);
}

View File

@ -1,5 +1,5 @@
/* Copyright (C) 2025 Kasm Technologies Corp
*
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@ -21,7 +21,6 @@
#include <dlfcn.h>
#include <memory>
#include <string>
#include "LogWriter.h"
extern "C" {
#include <libavcodec/avcodec.h>
@ -29,61 +28,115 @@ extern "C" {
#include <libswscale/swscale.h>
}
#include "benchmark.h"
#define STR_HELPER(x) #x
#define STR(x) STR_HELPER(x)
#define CONCAT_STR(a, b) a b
#define D_LOOKUP_SYM(handle, name) \
[](auto handle, auto *sym_name) -> auto { \
auto *sym = reinterpret_cast<name##_func>(dlsym(handle, sym_name)); \
if (!sym) \
throw std::runtime_error("Failed to load symbol "s + sym_name); \
return sym; \
#define D_LOOKUP_SYM(handle, name) \
[](auto handle, auto *sym_name) -> auto { \
auto *sym = reinterpret_cast<name##_func>(dlsym(handle, sym_name)); \
if (!sym) \
throw std::runtime_error("Failed to load symbol "s + sym_name); \
return sym; \
}(handle, STR(name))
#define DEFINE_GUARD(name, type, deleter) \
using name##Guard = std::unique_ptr<type, decltype([](auto *ptr){deleter##_f(&ptr);})>;
//using SwsContextGuard = std::unique_ptr<SwsContext, SwsContextDeleter>;
// using SwsContextGuard = std::unique_ptr<SwsContext, SwsContextDeleter>;
class FFmpeg final {
struct AVFrameDeleter {
void operator()(AVFrame *frame) const {
av_frame_free(&frame);
}
};
struct AVPacketDeleter {
void operator()(AVPacket *pkt) const {
av_packet_free(&pkt);
}
};
struct AVCodecContextDeleter {
void operator()(AVCodecContext *ctx) const {
avcodec_free_context(&ctx);
}
};
struct AVFormatContextDeleter {
void operator()(AVFormatContext *ctx) const {
avformat_close_input(&ctx);
}
};
struct SwsContextDeleter {
void operator()(SwsContext *ctx) const {
sws_freeContext(ctx);
}
};
struct AVBufferRefDeleter {
void operator()(AVBufferRef *buf) const {
av_buffer_unref(&buf);
}
};
#define DEFINE_GUARD(name, type) using name##Guard = std::unique_ptr<type, type##Deleter>;
class FFmpegFrameFeeder final {
// libavformat
using avformat_close_input_func = void(*)(AVFormatContext **);
using avformat_open_input_func = int(*)(AVFormatContext **ps, const char *url, const AVInputFormat *fmt,
AVDictionary **options);
using avformat_close_input_func = void (*)(AVFormatContext **);
using avformat_open_input_func = int (*)(AVFormatContext **ps, const char *url, const AVInputFormat *fmt,
AVDictionary **options);
using avformat_find_stream_info_func = int (*)(AVFormatContext *ic, AVDictionary **options);
using av_read_frame_func = int (*)(AVFormatContext *s, AVPacket *pkt);
using av_seek_frame_func = int (*)(AVFormatContext *s, int stream_index, int64_t timestamp, int flags);
// libavutil
using av_frame_free_func = void (*)(AVFrame **);
using av_frame_alloc_func = AVFrame *(*)();
using av_frame_alloc_func = AVFrame *(*) ();
using av_frame_get_buffer_func = int (*)(AVFrame *frame, int align);
using av_frame_unref_func = void (*)(AVFrame *frame);
using av_opt_next_func = const AVOption *(*) (const void *obj, const AVOption *prev);
using av_opt_set_func = int (*)(void *obj, const char *name, const char *val, int search_flags);
using av_opt_set_int_func = int (*)(void *obj, const char *name, int64_t val, int search_flags);
using av_buffer_unref_func = void (*)(AVBufferRef **);
using av_hwdevice_ctx_create_func = int (*)(AVBufferRef **device_ctx, AVHWDeviceType type, const char *device,
AVDictionary *opts, int flags);
using av_hwframe_ctx_alloc_func = AVBufferRef *(*) (AVBufferRef *device_ctx);
using av_hwframe_ctx_init_func = int (*)(AVBufferRef *ref);
using av_buffer_ref_func = AVBufferRef *(*) (const AVBufferRef *buf);
using av_hwframe_get_buffer_func = int (*)(AVBufferRef *hwframe_ctx, AVFrame *frame, int flags);
using av_hwframe_transfer_data_func = int (*)(AVFrame *dst, const AVFrame *src, int flags);
using av_strerror_func = int (*)(int errnum, char *errbuf, size_t errbuf_size);
using av_log_set_level_func = void (*)(int level);
using av_log_set_callback_func = void (*)(void (*callback)(void *, int, const char *, va_list));
// libswscale
using sws_freeContext_func = void (*)(SwsContext *);
using sws_getContext_func = SwsContext * (*)(int srcW, int srcH, AVPixelFormat srcFormat, int dstW, int dstH,
using sws_getContext_func = SwsContext *(*) (int srcW, int srcH, AVPixelFormat srcFormat, int dstW, int dstH,
AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param);
using sws_scale_func = int(*)(SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY,
int srcSliceH, uint8_t *const dst[], const int dstStride[]);
using sws_scale_func = int (*)(SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY,
int srcSliceH, uint8_t *const dst[], const int dstStride[]);
// libavcodec
using avcodec_free_context_func = void (*)(AVCodecContext **);
using av_packet_free_func = void (*)(AVPacket **);
using avcodec_find_decoder_func = const AVCodec * (*)(AVCodecID id);
using avcodec_alloc_context3_func = AVCodecContext* (*)(const AVCodec *codec);
using avcodec_find_encoder_func = const AVCodec *(*) (AVCodecID id);
using avcodec_find_encoder_by_name_func = const AVCodec *(*) (const char *name);
using avcodec_find_decoder_func = const AVCodec *(*) (AVCodecID id);
using avcodec_alloc_context3_func = AVCodecContext *(*) (const AVCodec *codec);
using avcodec_parameters_to_context_func = int (*)(AVCodecContext *codec, const AVCodecParameters *par);
using avcodec_open2_func = int (*)(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
using av_packet_alloc_func = AVPacket *(*)();
using avcodec_send_packet_func = int(*)(AVCodecContext *avctx, const AVPacket *avpkt);
using avcodec_receive_frame_func = int(*)(AVCodecContext *avctx, AVFrame *frame);
using av_packet_alloc_func = AVPacket *(*) ();
using avcodec_send_frame_func = int (*)(AVCodecContext *avctx, const AVFrame *frame);
using avcodec_send_packet_func = int (*)(AVCodecContext *avctx, const AVPacket *avpkt);
using avcodec_receive_packet_func = int (*)(AVCodecContext *avctx, AVPacket *avpkt);
using avcodec_receive_frame_func = int (*)(AVCodecContext *avctx, AVFrame *frame);
using av_packet_unref_func = void (*)(AVPacket *pkt);
using avcodec_flush_buffers_func = void (*)(AVCodecContext *avctx);
using avcodec_close_func = int (*)(AVCodecContext *avctx);
using av_codec_is_encoder_func = int (*)(const AVCodec *codec);
struct DlHandler {
void operator()(void *handle) const {
@ -94,7 +147,7 @@ class FFmpegFrameFeeder final {
using DlHandlerGuard = std::unique_ptr<void, DlHandler>;
// libavformat
avformat_close_input_func avformat_close_input_f{};
static inline avformat_close_input_func avformat_close_input_f{};
avformat_open_input_func avformat_open_input_f{};
avformat_find_stream_info_func avformat_find_stream_info_f{};
av_read_frame_func av_read_frame_f{};
@ -104,65 +157,242 @@ class FFmpegFrameFeeder final {
static inline av_frame_free_func av_frame_free_f{};
av_frame_alloc_func av_frame_alloc_f{};
av_frame_get_buffer_func av_frame_get_buffer_f{};
av_frame_unref_func av_frame_unref_f{};
av_opt_next_func av_opt_next_f{};
av_opt_set_func av_opt_set_f{};
av_opt_set_int_func av_opt_set_int_f{};
static inline av_buffer_unref_func av_buffer_unref_f{};
av_hwdevice_ctx_create_func av_hwdevice_ctx_create_f{};
av_hwframe_ctx_alloc_func av_hwframe_ctx_alloc_f{};
av_hwframe_ctx_init_func av_hwframe_ctx_init_f{};
av_buffer_ref_func av_buffer_ref_f{};
av_hwframe_get_buffer_func av_hwframe_get_buffer_f{};
av_hwframe_transfer_data_func av_hwframe_transfer_data_f{};
av_strerror_func av_strerror_f{};
av_log_set_level_func av_log_set_level_f{};
av_log_set_callback_func av_log_set_callback_f{};
// libswscale
sws_freeContext_func sws_freeContext_f{};
static inline sws_freeContext_func sws_freeContext_f{};
sws_getContext_func sws_getContext_f{};
sws_scale_func sws_scale_f{};
// libavcodec
avcodec_free_context_func avcodec_free_context_f{};
static inline avcodec_free_context_func avcodec_free_context_f{};
static inline av_packet_free_func av_packet_free_f{};
avcodec_find_encoder_func avcodec_find_encoder_f{};
avcodec_find_encoder_by_name_func avcodec_find_encoder_by_name_f{};
avcodec_find_decoder_func avcodec_find_decoder_f{};
avcodec_alloc_context3_func avcodec_alloc_context3_f{};
avcodec_parameters_to_context_func avcodec_parameters_to_context_f{};
avcodec_open2_func avcodec_open2_f{};
av_packet_alloc_func av_packet_alloc_f{};
avcodec_send_frame_func avcodec_send_frame_f{};
avcodec_send_packet_func avcodec_send_packet_f{};
avcodec_receive_frame_func avcodec_receive_frame_f{};
avcodec_receive_packet_func avcodec_receive_packet_f{};
av_packet_unref_func av_packet_unref_f{};
avcodec_flush_buffers_func avcodec_flush_buffers_f{};
avcodec_close_func avcodec_close_f{};
rfb::LogWriter vlog{"FFmpeg"};
DEFINE_GUARD(Frame, AVFrame, av_frame_free)
DEFINE_GUARD(Packet, AVPacket, av_packet_free)
AVFormatContext *format_ctx{};
AVCodecContext *codec_ctx{};
int video_stream_idx{-1};
av_codec_is_encoder_func av_codec_is_encoder_f{};
DlHandlerGuard libavformat{};
DlHandlerGuard libavutil{};
DlHandlerGuard libswscale{};
DlHandlerGuard libavcodec{};
FFmpeg();
~FFmpeg() = default;
static void av_log_callback(void *ptr, int level, const char *fmt, va_list vl);
bool available{};
public:
FFmpegFrameFeeder();
~FFmpegFrameFeeder();
void open(std::string_view path);
[[nodiscard]] int64_t get_total_frame_count() const {
return format_ctx->streams[video_stream_idx]->nb_frames;
[[nodiscard]] static FFmpeg &get() {
static FFmpeg instance;
return instance;
}
struct frame_dimensions_t {
int width{};
int height{};
};
[[nodiscard]] frame_dimensions_t get_frame_dimensions() const {
return {codec_ctx->width, codec_ctx->height};
[[nodiscard]] bool is_available() const {
return available;
}
struct play_stats_t {
uint64_t frames{};
uint64_t total{};
std::vector<uint64_t> timings;
static void avformat_close_input(AVFormatContext **s) {
avformat_close_input_f(s);
}
[[nodiscard]] int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt,
AVDictionary **options) const {
return avformat_open_input_f(ps, url, fmt, options);
}
[[nodiscard]] int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) const {
return avformat_find_stream_info_f(ic, options);
}
[[nodiscard]] int av_read_frame(AVFormatContext *s, AVPacket *pkt) const {
return av_read_frame_f(s, pkt);
}
[[nodiscard]] int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) const {
return av_seek_frame_f(s, stream_index, timestamp, flags);
}
// libavutil
static void av_frame_free(AVFrame **frame) {
av_frame_free_f(frame);
}
[[nodiscard]] AVFrame *av_frame_alloc() const {
return av_frame_alloc_f();
}
[[nodiscard]] int av_frame_get_buffer(AVFrame *frame, int align) const {
return av_frame_get_buffer_f(frame, align);
}
void av_frame_unref(AVFrame *frame) const {
av_frame_unref_f(frame);
}
[[nodiscard]] const AVOption *av_opt_next(const void *obj, const AVOption *prev) {
return av_opt_next_f(obj, prev);
}
[[nodiscard]] int av_opt_set(void *obj, const char *name, const char *val, int search_flags) const {
return av_opt_set_f(obj, name, val, search_flags);
}
[[nodiscard]] int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags) const {
return av_opt_set_int_f(obj, name, val, search_flags);
}
static void av_buffer_unref(AVBufferRef **buf) {
av_buffer_unref_f(buf);
}
[[nodiscard]] int av_hwdevice_ctx_create(AVBufferRef **device_ctx, AVHWDeviceType type, const char *device,
AVDictionary *opts, int flags) const {
return av_hwdevice_ctx_create_f(device_ctx, type, device, opts, flags);
}
[[nodiscard]] AVBufferRef *av_hwframe_ctx_alloc(AVBufferRef *device_ctx) const {
return av_hwframe_ctx_alloc_f(device_ctx);
}
[[nodiscard]] int av_hwframe_ctx_init(AVBufferRef *ref) const {
return av_hwframe_ctx_init_f(ref);
}
[[nodiscard]] AVBufferRef *av_buffer_ref(const AVBufferRef *buf) const {
return av_buffer_ref_f(buf);
}
[[nodiscard]] int av_hwframe_get_buffer(AVBufferRef *hwframe_ctx, AVFrame *frame, int flags) const {
return av_hwframe_get_buffer_f(hwframe_ctx, frame, flags);
}
[[nodiscard]] int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags) const {
return av_hwframe_transfer_data_f(dst, src, flags);
}
// libswscale
static void sws_freeContext(SwsContext *sws_context) {
sws_freeContext_f(sws_context);
}
[[nodiscard]] SwsContext *sws_getContext(int srcW, int srcH, AVPixelFormat srcFormat, int dstW, int dstH,
AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param) const {
return sws_getContext_f(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, srcFilter, dstFilter, param);
}
[[nodiscard]] int sws_scale(SwsContext *c, const uint8_t *const src_slice[], const int src_stride[],
int src_slice_y, int src_slice_h, uint8_t *const dst[], const int dst_stride[]) const {
return sws_scale_f(c, src_slice, src_stride, src_slice_y, src_slice_h, dst, dst_stride);
};
play_stats_t play(benchmarking::MockTestConnection *connection) const;
// libavcodec
[[nodiscard]] const AVCodec *avcodec_find_encoder(AVCodecID id) const {
return avcodec_find_encoder_f(id);
}
[[nodiscard]] const AVCodec *avcodec_find_decoder(AVCodecID id) const {
return avcodec_find_decoder_f(id);
}
[[nodiscard]] int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par) const {
return avcodec_parameters_to_context_f(codec, par);
}
static void avcodec_free_context(AVCodecContext **avctx) {
avcodec_free_context_f(avctx);
}
static void av_packet_free(AVPacket **pkt) {
av_packet_free_f(pkt);
}
[[nodiscard]] AVCodecContext *avcodec_alloc_context3(const AVCodec *codec) const {
return avcodec_alloc_context3_f(codec);
}
[[nodiscard]] int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) const {
return avcodec_open2_f(avctx, codec, options);
}
[[nodiscard]] const AVCodec *avcodec_find_encoder_by_name(const char *name) const {
return avcodec_find_encoder_by_name_f(name);
}
[[nodiscard]] AVPacket *av_packet_alloc() const {
return av_packet_alloc_f();
}
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame) const {
return avcodec_send_frame_f(avctx, frame);
}
[[nodiscard]] int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt) const {
return avcodec_send_packet_f(avctx, avpkt);
}
[[nodiscard]] int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame) const {
return avcodec_receive_frame_f(avctx, frame);
}
[[nodiscard]] int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) const {
return avcodec_receive_packet_f(avctx, avpkt);
}
void av_packet_unref(AVPacket *pkt) const {
av_packet_unref_f(pkt);
}
void avcodec_flush_buffers(AVCodecContext *avctx) const {
avcodec_flush_buffers_f(avctx);
}
int avcodec_close(AVCodecContext *avctx) const {
return avcodec_close_f(avctx);
}
int av_codec_is_encoder(const AVCodec *codec) const {
return av_codec_is_encoder_f(codec);
}
DEFINE_GUARD(Frame, AVFrame)
DEFINE_GUARD(Packet, AVPacket)
DEFINE_GUARD(Context, AVCodecContext)
DEFINE_GUARD(FormatCtx, AVFormatContext)
DEFINE_GUARD(SwsContext, SwsContext)
DEFINE_GUARD(Buffer, AVBufferRef);
[[nodiscard]] std::string get_error_description(int err) const {
char errbuf[AV_ERROR_MAX_STRING_SIZE]{};
av_strerror_f(err, errbuf, AV_ERROR_MAX_STRING_SIZE);
return {errbuf};
}
};

View File

@ -21,57 +21,58 @@
namespace rfb {
// server to client
const int msgTypeFramebufferUpdate = 0;
const int msgTypeSetColourMapEntries = 1;
const int msgTypeBell = 2;
const int msgTypeServerCutText = 3;
constexpr int msgTypeFramebufferUpdate = 0;
constexpr int msgTypeSetColourMapEntries = 1;
constexpr int msgTypeBell = 2;
constexpr int msgTypeServerCutText = 3;
const int msgTypeEndOfContinuousUpdates = 150;
constexpr int msgTypeEndOfContinuousUpdates = 150;
// kasm
const int msgTypeStats = 178;
const int msgTypeRequestFrameStats = 179;
const int msgTypeBinaryClipboard = 180;
const int msgTypeUpgradeToUdp = 181;
const int msgTypeSubscribeUnixRelay = 182;
const int msgTypeUnixRelay = 183;
const int msgTypeKeepAlive = 184;
const int msgTypeServerDisconnect = 185;
const int msgTypeServerFence = 248;
const int msgTypeUserAddedToSession = 253;
const int msgTypeUserRemovedFromSession = 254;
constexpr int msgTypeStats = 178;
constexpr int msgTypeRequestFrameStats = 179;
constexpr int msgTypeBinaryClipboard = 180;
constexpr int msgTypeUpgradeToUdp = 181;
constexpr int msgTypeSubscribeUnixRelay = 182;
constexpr int msgTypeUnixRelay = 183;
constexpr int msgTypeVideoEncoders = 184;
constexpr int msgTypeKeepAlive = 185;
constexpr int msgTypeServerDisconnect = 186;
constexpr int msgTypeServerFence = 248;
constexpr int msgTypeUserAddedToSession = 253;
constexpr int msgTypeUserRemovedFromSession = 254;
// client to server
const int msgTypeSetPixelFormat = 0;
const int msgTypeFixColourMapEntries = 1;
const int msgTypeSetEncodings = 2;
const int msgTypeFramebufferUpdateRequest = 3;
const int msgTypeKeyEvent = 4;
const int msgTypePointerEvent = 5;
const int msgTypeClientCutText = 6;
constexpr int msgTypeSetPixelFormat = 0;
constexpr int msgTypeFixColourMapEntries = 1;
constexpr int msgTypeSetEncodings = 2;
constexpr int msgTypeFramebufferUpdateRequest = 3;
constexpr int msgTypeKeyEvent = 4;
constexpr int msgTypePointerEvent = 5;
constexpr int msgTypeClientCutText = 6;
const int msgTypeEnableContinuousUpdates = 150;
constexpr int msgTypeEnableContinuousUpdates = 150;
// kasm
const int msgTypeRequestStats = 178;
const int msgTypeFrameStats = 179;
constexpr int msgTypeRequestStats = 178;
constexpr int msgTypeFrameStats = 179;
// same as the other direction
//const int msgTypeBinaryClipboard = 180;
//const int msgTypeUpgradeToUdp = 181;
//const int msgTypeSubscribeUnixRelay = 182;
//const int msgTypeUnixRelay = 183;
//const int msgTypeKeepAlive = 184;
//const int msgTypeServerDisconnect = 185;
//constexpr int msgTypeBinaryClipboard = 180;
//constexpr int msgTypeUpgradeToUdp = 181;
//constexpr int msgTypeSubscribeUnixRelay = 182;
//constexpr int msgTypeUnixRelay = 183;
//constexpr int msgTypeVideoEncoders = 184;
//constexpr int msgTypeKeepAlive = 185;
//constexpr int msgTypeServerDisconnect = 186;
const int msgTypeClientFence = 248;
constexpr int msgTypeClientFence = 248;
const int msgTypeSetDesktopSize = 251;
constexpr int msgTypeSetDesktopSize = 251;
const int msgTypeSetMaxVideoResolution = 252;
constexpr int msgTypeSetMaxVideoResolution = 252;
const int msgTypeQEMUClientMessage = 255;
constexpr int msgTypeQEMUClientMessage = 255;
}
#endif

3
debian/control vendored
View File

@ -14,7 +14,8 @@ Architecture: amd64 arm64
Depends: ${shlibs:Depends}, ${misc:Depends}, ${perl:Depends}, ssl-cert, xauth,
x11-xkb-utils, xkb-data, procps, libswitch-perl, libyaml-tiny-perl,
libhash-merge-simple-perl, libscalar-list-utils-perl, liblist-moreutils-perl,
libtry-tiny-perl, libdatetime-perl, libdatetime-timezone-perl, libgbm1
libtry-tiny-perl, libdatetime-perl, libdatetime-timezone-perl, libgbm1,
Recommends: intel-media-va-driver-non-free [amd64]
Suggests: systemd
Provides: vnc-server
Description: KasmVNC provides remote web-based access to a Desktop or application.

8
dev_session_start.sh Executable file
View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
unset SESSION_MANAGER
unset DBUS_SESSION_BUS_ADDRESS
while ( true ); do
/usr/bin/xfce4-session --display :0
done;

@ -1 +1 @@
Subproject commit 4166ae84781910977605ebda9c03de105839367b
Subproject commit 2d99f224265d16ee8d18c60f47712b065b507729

Some files were not shown because too many files have changed in this diff Show More