id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,994
|
publish.cpp
|
LizardByte_Sunshine/src/platform/linux/publish.cpp
|
/**
* @file src/platform/linux/publish.cpp
* @brief Definitions for publishing services on Linux.
* @note Adapted from https://www.avahi.org/doxygen/html/client-publish-service_8c-example.html
*/
#include <thread>
#include "misc.h"
#include "src/logging.h"
#include "src/network.h"
#include "src/nvhttp.h"
#include "src/platform/common.h"
#include "src/utility.h"
using namespace std::literals;
namespace avahi {
/**
* @brief Error codes used by avahi.
*/
enum err_e {
OK = 0, ///< OK
ERR_FAILURE = -1, ///< Generic error code
ERR_BAD_STATE = -2, ///< Object was in a bad state
ERR_INVALID_HOST_NAME = -3, ///< Invalid host name
ERR_INVALID_DOMAIN_NAME = -4, ///< Invalid domain name
ERR_NO_NETWORK = -5, ///< No suitable network protocol available
ERR_INVALID_TTL = -6, ///< Invalid DNS TTL
ERR_IS_PATTERN = -7, ///< RR key is pattern
ERR_COLLISION = -8, ///< Name collision
ERR_INVALID_RECORD = -9, ///< Invalid RR
ERR_INVALID_SERVICE_NAME = -10, ///< Invalid service name
ERR_INVALID_SERVICE_TYPE = -11, ///< Invalid service type
ERR_INVALID_PORT = -12, ///< Invalid port number
ERR_INVALID_KEY = -13, ///< Invalid key
ERR_INVALID_ADDRESS = -14, ///< Invalid address
ERR_TIMEOUT = -15, ///< Timeout reached
ERR_TOO_MANY_CLIENTS = -16, ///< Too many clients
ERR_TOO_MANY_OBJECTS = -17, ///< Too many objects
ERR_TOO_MANY_ENTRIES = -18, ///< Too many entries
ERR_OS = -19, ///< OS error
ERR_ACCESS_DENIED = -20, ///< Access denied
ERR_INVALID_OPERATION = -21, ///< Invalid operation
ERR_DBUS_ERROR = -22, ///< An unexpected D-Bus error occurred
ERR_DISCONNECTED = -23, ///< Daemon connection failed
ERR_NO_MEMORY = -24, ///< Memory exhausted
ERR_INVALID_OBJECT = -25, ///< The object passed to this function was invalid
ERR_NO_DAEMON = -26, ///< Daemon not running
ERR_INVALID_INTERFACE = -27, ///< Invalid interface
ERR_INVALID_PROTOCOL = -28, ///< Invalid protocol
ERR_INVALID_FLAGS = -29, ///< Invalid flags
ERR_NOT_FOUND = -30, ///< Not found
ERR_INVALID_CONFIG = -31, ///< Configuration error
ERR_VERSION_MISMATCH = -32, ///< Version mismatch
ERR_INVALID_SERVICE_SUBTYPE = -33, ///< Invalid service subtype
ERR_INVALID_PACKET = -34, ///< Invalid packet
ERR_INVALID_DNS_ERROR = -35, ///< Invalid DNS return code
ERR_DNS_FORMERR = -36, ///< DNS Error: Form error
ERR_DNS_SERVFAIL = -37, ///< DNS Error: Server Failure
ERR_DNS_NXDOMAIN = -38, ///< DNS Error: No such domain
ERR_DNS_NOTIMP = -39, ///< DNS Error: Not implemented
ERR_DNS_REFUSED = -40, ///< DNS Error: Operation refused
ERR_DNS_YXDOMAIN = -41, ///< TODO
ERR_DNS_YXRRSET = -42, ///< TODO
ERR_DNS_NXRRSET = -43, ///< TODO
ERR_DNS_NOTAUTH = -44, ///< DNS Error: Not authorized
ERR_DNS_NOTZONE = -45, ///< TODO
ERR_INVALID_RDATA = -46, ///< Invalid RDATA
ERR_INVALID_DNS_CLASS = -47, ///< Invalid DNS class
ERR_INVALID_DNS_TYPE = -48, ///< Invalid DNS type
ERR_NOT_SUPPORTED = -49, ///< Not supported
ERR_NOT_PERMITTED = -50, ///< Operation not permitted
ERR_INVALID_ARGUMENT = -51, ///< Invalid argument
ERR_IS_EMPTY = -52, ///< Is empty
ERR_NO_CHANGE = -53, ///< The requested operation is invalid because it is redundant
ERR_MAX = -54 ///< TODO
};
constexpr auto IF_UNSPEC = -1;
enum proto {
PROTO_INET = 0, ///< IPv4
PROTO_INET6 = 1, ///< IPv6
PROTO_UNSPEC = -1 ///< Unspecified/all protocol(s)
};
enum ServerState {
SERVER_INVALID, ///< Invalid state (initial)
SERVER_REGISTERING, ///< Host RRs are being registered
SERVER_RUNNING, ///< All host RRs have been established
SERVER_COLLISION, ///< There is a collision with a host RR. All host RRs have been withdrawn, the user should set a new host name via avahi_server_set_host_name()
SERVER_FAILURE ///< Some fatal failure happened, the server is unable to proceed
};
enum ClientState {
CLIENT_S_REGISTERING = SERVER_REGISTERING, ///< Server state: REGISTERING
CLIENT_S_RUNNING = SERVER_RUNNING, ///< Server state: RUNNING
CLIENT_S_COLLISION = SERVER_COLLISION, ///< Server state: COLLISION
CLIENT_FAILURE = 100, ///< Some kind of error happened on the client side
CLIENT_CONNECTING = 101 ///< We're still connecting. This state is only entered when AVAHI_CLIENT_NO_FAIL has been passed to avahi_client_new() and the daemon is not yet available.
};
enum EntryGroupState {
ENTRY_GROUP_UNCOMMITED, ///< The group has not yet been committed, the user must still call avahi_entry_group_commit()
ENTRY_GROUP_REGISTERING, ///< The entries of the group are currently being registered
ENTRY_GROUP_ESTABLISHED, ///< The entries have successfully been established
ENTRY_GROUP_COLLISION, ///< A name collision for one of the entries in the group has been detected, the entries have been withdrawn
ENTRY_GROUP_FAILURE ///< Some kind of failure happened, the entries have been withdrawn
};
enum ClientFlags {
CLIENT_IGNORE_USER_CONFIG = 1, ///< Don't read user configuration
CLIENT_NO_FAIL = 2 ///< Don't fail if the daemon is not available when avahi_client_new() is called, instead enter CLIENT_CONNECTING state and wait for the daemon to appear
};
/**
* @brief Flags for publishing functions.
*/
enum PublishFlags {
PUBLISH_UNIQUE = 1, ///< For raw records: The RRset is intended to be unique
PUBLISH_NO_PROBE = 2, ///< For raw records: Though the RRset is intended to be unique no probes shall be sent
PUBLISH_NO_ANNOUNCE = 4, ///< For raw records: Do not announce this RR to other hosts
PUBLISH_ALLOW_MULTIPLE = 8, ///< For raw records: Allow multiple local records of this type, even if they are intended to be unique
PUBLISH_NO_REVERSE = 16, ///< For address records: don't create a reverse (PTR) entry
PUBLISH_NO_COOKIE = 32, ///< For service records: do not implicitly add the local service cookie to TXT data
PUBLISH_UPDATE = 64, ///< Update existing records instead of adding new ones
PUBLISH_USE_WIDE_AREA = 128, ///< Register the record using wide area DNS (i.e. unicast DNS update)
PUBLISH_USE_MULTICAST = 256 ///< Register the record using multicast DNS
};
using IfIndex = int;
using Protocol = int;
struct EntryGroup;
struct Poll;
struct SimplePoll;
struct Client;
typedef void (*ClientCallback)(Client *, ClientState, void *userdata);
typedef void (*EntryGroupCallback)(EntryGroup *g, EntryGroupState state, void *userdata);
typedef void (*free_fn)(void *);
typedef Client *(*client_new_fn)(const Poll *poll_api, ClientFlags flags, ClientCallback callback, void *userdata, int *error);
typedef void (*client_free_fn)(Client *);
typedef char *(*alternative_service_name_fn)(char *);
typedef Client *(*entry_group_get_client_fn)(EntryGroup *);
typedef EntryGroup *(*entry_group_new_fn)(Client *, EntryGroupCallback, void *userdata);
typedef int (*entry_group_add_service_fn)(
EntryGroup *group,
IfIndex interface,
Protocol protocol,
PublishFlags flags,
const char *name,
const char *type,
const char *domain,
const char *host,
uint16_t port,
...);
typedef int (*entry_group_is_empty_fn)(EntryGroup *);
typedef int (*entry_group_reset_fn)(EntryGroup *);
typedef int (*entry_group_commit_fn)(EntryGroup *);
typedef char *(*strdup_fn)(const char *);
typedef char *(*strerror_fn)(int);
typedef int (*client_errno_fn)(Client *);
typedef Poll *(*simple_poll_get_fn)(SimplePoll *);
typedef int (*simple_poll_loop_fn)(SimplePoll *);
typedef void (*simple_poll_quit_fn)(SimplePoll *);
typedef SimplePoll *(*simple_poll_new_fn)();
typedef void (*simple_poll_free_fn)(SimplePoll *);
free_fn free;
client_new_fn client_new;
client_free_fn client_free;
alternative_service_name_fn alternative_service_name;
entry_group_get_client_fn entry_group_get_client;
entry_group_new_fn entry_group_new;
entry_group_add_service_fn entry_group_add_service;
entry_group_is_empty_fn entry_group_is_empty;
entry_group_reset_fn entry_group_reset;
entry_group_commit_fn entry_group_commit;
strdup_fn strdup;
strerror_fn strerror;
client_errno_fn client_errno;
simple_poll_get_fn simple_poll_get;
simple_poll_loop_fn simple_poll_loop;
simple_poll_quit_fn simple_poll_quit;
simple_poll_new_fn simple_poll_new;
simple_poll_free_fn simple_poll_free;
int
init_common() {
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libavahi-common.so.3", "libavahi-common.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &alternative_service_name, "avahi_alternative_service_name" },
{ (dyn::apiproc *) &free, "avahi_free" },
{ (dyn::apiproc *) &strdup, "avahi_strdup" },
{ (dyn::apiproc *) &strerror, "avahi_strerror" },
{ (dyn::apiproc *) &simple_poll_get, "avahi_simple_poll_get" },
{ (dyn::apiproc *) &simple_poll_loop, "avahi_simple_poll_loop" },
{ (dyn::apiproc *) &simple_poll_quit, "avahi_simple_poll_quit" },
{ (dyn::apiproc *) &simple_poll_new, "avahi_simple_poll_new" },
{ (dyn::apiproc *) &simple_poll_free, "avahi_simple_poll_free" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
int
init_client() {
if (init_common()) {
return -1;
}
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libavahi-client.so.3", "libavahi-client.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &client_new, "avahi_client_new" },
{ (dyn::apiproc *) &client_free, "avahi_client_free" },
{ (dyn::apiproc *) &entry_group_get_client, "avahi_entry_group_get_client" },
{ (dyn::apiproc *) &entry_group_new, "avahi_entry_group_new" },
{ (dyn::apiproc *) &entry_group_add_service, "avahi_entry_group_add_service" },
{ (dyn::apiproc *) &entry_group_is_empty, "avahi_entry_group_is_empty" },
{ (dyn::apiproc *) &entry_group_reset, "avahi_entry_group_reset" },
{ (dyn::apiproc *) &entry_group_commit, "avahi_entry_group_commit" },
{ (dyn::apiproc *) &client_errno, "avahi_client_errno" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
} // namespace avahi
namespace platf::publish {
template <class T>
void
free(T *p) {
avahi::free(p);
}
template <class T>
using ptr_t = util::safe_ptr<T, free<T>>;
using client_t = util::dyn_safe_ptr<avahi::Client, &avahi::client_free>;
using poll_t = util::dyn_safe_ptr<avahi::SimplePoll, &avahi::simple_poll_free>;
avahi::EntryGroup *group = nullptr;
poll_t poll;
client_t client;
ptr_t<char> name;
void
create_services(avahi::Client *c);
void
entry_group_callback(avahi::EntryGroup *g, avahi::EntryGroupState state, void *) {
group = g;
switch (state) {
case avahi::ENTRY_GROUP_ESTABLISHED:
BOOST_LOG(info) << "Avahi service " << name.get() << " successfully established.";
break;
case avahi::ENTRY_GROUP_COLLISION:
name.reset(avahi::alternative_service_name(name.get()));
BOOST_LOG(info) << "Avahi service name collision, renaming service to " << name.get();
create_services(avahi::entry_group_get_client(g));
break;
case avahi::ENTRY_GROUP_FAILURE:
BOOST_LOG(error) << "Avahi entry group failure: " << avahi::strerror(avahi::client_errno(avahi::entry_group_get_client(g)));
avahi::simple_poll_quit(poll.get());
break;
case avahi::ENTRY_GROUP_UNCOMMITED:
case avahi::ENTRY_GROUP_REGISTERING:;
}
}
void
create_services(avahi::Client *c) {
int ret;
auto fg = util::fail_guard([]() {
avahi::simple_poll_quit(poll.get());
});
if (!group) {
if (!(group = avahi::entry_group_new(c, entry_group_callback, nullptr))) {
BOOST_LOG(error) << "avahi::entry_group_new() failed: "sv << avahi::strerror(avahi::client_errno(c));
return;
}
}
if (avahi::entry_group_is_empty(group)) {
BOOST_LOG(info) << "Adding avahi service "sv << name.get();
ret = avahi::entry_group_add_service(
group,
avahi::IF_UNSPEC, avahi::PROTO_UNSPEC,
avahi::PublishFlags(0),
name.get(),
SERVICE_TYPE,
nullptr, nullptr,
net::map_port(nvhttp::PORT_HTTP),
nullptr);
if (ret < 0) {
if (ret == avahi::ERR_COLLISION) {
// A service name collision with a local service happened. Let's pick a new name
name.reset(avahi::alternative_service_name(name.get()));
BOOST_LOG(info) << "Service name collision, renaming service to "sv << name.get();
avahi::entry_group_reset(group);
create_services(c);
fg.disable();
return;
}
BOOST_LOG(error) << "Failed to add "sv << SERVICE_TYPE << " service: "sv << avahi::strerror(ret);
return;
}
ret = avahi::entry_group_commit(group);
if (ret < 0) {
BOOST_LOG(error) << "Failed to commit entry group: "sv << avahi::strerror(ret);
return;
}
}
fg.disable();
}
void
client_callback(avahi::Client *c, avahi::ClientState state, void *) {
switch (state) {
case avahi::CLIENT_S_RUNNING:
create_services(c);
break;
case avahi::CLIENT_FAILURE:
BOOST_LOG(error) << "Client failure: "sv << avahi::strerror(avahi::client_errno(c));
avahi::simple_poll_quit(poll.get());
break;
case avahi::CLIENT_S_COLLISION:
case avahi::CLIENT_S_REGISTERING:
if (group)
avahi::entry_group_reset(group);
break;
case avahi::CLIENT_CONNECTING:;
}
}
class deinit_t: public ::platf::deinit_t {
public:
std::thread poll_thread;
deinit_t(std::thread poll_thread):
poll_thread { std::move(poll_thread) } {}
~deinit_t() override {
if (avahi::simple_poll_quit && poll) {
avahi::simple_poll_quit(poll.get());
}
if (poll_thread.joinable()) {
poll_thread.join();
}
}
};
[[nodiscard]] std::unique_ptr<::platf::deinit_t>
start() {
if (avahi::init_client()) {
return nullptr;
}
int avhi_error;
poll.reset(avahi::simple_poll_new());
if (!poll) {
BOOST_LOG(error) << "Failed to create simple poll object."sv;
return nullptr;
}
auto instance_name = net::mdns_instance_name(platf::get_host_name());
name.reset(avahi::strdup(instance_name.c_str()));
client.reset(
avahi::client_new(avahi::simple_poll_get(poll.get()), avahi::ClientFlags(0), client_callback, nullptr, &avhi_error));
if (!client) {
BOOST_LOG(error) << "Failed to create client: "sv << avahi::strerror(avhi_error);
return nullptr;
}
return std::make_unique<deinit_t>(std::thread { avahi::simple_poll_loop, poll.get() });
}
} // namespace platf::publish
| 15,504
|
C++
|
.cpp
| 367
| 36.904632
| 185
| 0.645997
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,995
|
kmsgrab.cpp
|
LizardByte_Sunshine/src/platform/linux/kmsgrab.cpp
|
/**
* @file src/platform/linux/kmsgrab.cpp
* @brief Definitions for KMS screen capture.
*/
#include <drm_fourcc.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/dma-buf.h>
#include <sys/capability.h>
#include <sys/mman.h>
#include <unistd.h>
#include <xf86drm.h>
#include <xf86drmMode.h>
#include <filesystem>
#include <thread>
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/round_robin.h"
#include "src/utility.h"
#include "src/video.h"
#include "cuda.h"
#include "graphics.h"
#include "vaapi.h"
#include "wayland.h"
using namespace std::literals;
namespace fs = std::filesystem;
namespace platf {
namespace kms {
class cap_sys_admin {
public:
cap_sys_admin() {
caps = cap_get_proc();
cap_value_t sys_admin = CAP_SYS_ADMIN;
if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &sys_admin, CAP_SET) || cap_set_proc(caps)) {
BOOST_LOG(error) << "Failed to gain CAP_SYS_ADMIN";
}
}
~cap_sys_admin() {
cap_value_t sys_admin = CAP_SYS_ADMIN;
if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &sys_admin, CAP_CLEAR) || cap_set_proc(caps)) {
BOOST_LOG(error) << "Failed to drop CAP_SYS_ADMIN";
}
cap_free(caps);
}
cap_t caps;
};
class wrapper_fb {
public:
wrapper_fb(drmModeFB *fb):
fb { fb }, fb_id { fb->fb_id }, width { fb->width }, height { fb->height } {
pixel_format = DRM_FORMAT_XRGB8888;
modifier = DRM_FORMAT_MOD_INVALID;
std::fill_n(handles, 4, 0);
std::fill_n(pitches, 4, 0);
std::fill_n(offsets, 4, 0);
handles[0] = fb->handle;
pitches[0] = fb->pitch;
}
wrapper_fb(drmModeFB2 *fb2):
fb2 { fb2 }, fb_id { fb2->fb_id }, width { fb2->width }, height { fb2->height } {
pixel_format = fb2->pixel_format;
modifier = (fb2->flags & DRM_MODE_FB_MODIFIERS) ? fb2->modifier : DRM_FORMAT_MOD_INVALID;
memcpy(handles, fb2->handles, sizeof(handles));
memcpy(pitches, fb2->pitches, sizeof(pitches));
memcpy(offsets, fb2->offsets, sizeof(offsets));
}
~wrapper_fb() {
if (fb) {
drmModeFreeFB(fb);
}
else if (fb2) {
drmModeFreeFB2(fb2);
}
}
drmModeFB *fb = nullptr;
drmModeFB2 *fb2 = nullptr;
uint32_t fb_id;
uint32_t width;
uint32_t height;
uint32_t pixel_format;
uint64_t modifier;
uint32_t handles[4];
uint32_t pitches[4];
uint32_t offsets[4];
};
using plane_res_t = util::safe_ptr<drmModePlaneRes, drmModeFreePlaneResources>;
using encoder_t = util::safe_ptr<drmModeEncoder, drmModeFreeEncoder>;
using res_t = util::safe_ptr<drmModeRes, drmModeFreeResources>;
using plane_t = util::safe_ptr<drmModePlane, drmModeFreePlane>;
using fb_t = std::unique_ptr<wrapper_fb>;
using crtc_t = util::safe_ptr<drmModeCrtc, drmModeFreeCrtc>;
using obj_prop_t = util::safe_ptr<drmModeObjectProperties, drmModeFreeObjectProperties>;
using prop_t = util::safe_ptr<drmModePropertyRes, drmModeFreeProperty>;
using prop_blob_t = util::safe_ptr<drmModePropertyBlobRes, drmModeFreePropertyBlob>;
using version_t = util::safe_ptr<drmVersion, drmFreeVersion>;
using conn_type_count_t = std::map<std::uint32_t, std::uint32_t>;
static int env_width;
static int env_height;
std::string_view
plane_type(std::uint64_t val) {
switch (val) {
case DRM_PLANE_TYPE_OVERLAY:
return "DRM_PLANE_TYPE_OVERLAY"sv;
case DRM_PLANE_TYPE_PRIMARY:
return "DRM_PLANE_TYPE_PRIMARY"sv;
case DRM_PLANE_TYPE_CURSOR:
return "DRM_PLANE_TYPE_CURSOR"sv;
}
return "UNKNOWN"sv;
}
struct connector_t {
// For example: HDMI-A or HDMI
std::uint32_t type;
// Equals zero if not applicable
std::uint32_t crtc_id;
// For example HDMI-A-{index} or HDMI-{index}
std::uint32_t index;
// ID of the connector
std::uint32_t connector_id;
bool connected;
};
struct monitor_t {
// Connector attributes
std::uint32_t type;
std::uint32_t index;
// Monitor index in the global list
std::uint32_t monitor_index;
platf::touch_port_t viewport;
};
struct card_descriptor_t {
std::string path;
std::map<std::uint32_t, monitor_t> crtc_to_monitor;
};
static std::vector<card_descriptor_t> card_descriptors;
static std::uint32_t
from_view(const std::string_view &string) {
#define _CONVERT(x, y) \
if (string == x) return DRM_MODE_CONNECTOR_##y
// This list was created from the following sources:
// https://gitlab.freedesktop.org/mesa/drm/-/blob/main/xf86drmMode.c (drmModeGetConnectorTypeName)
// https://gitlab.freedesktop.org/wayland/weston/-/blob/e74f2897b9408b6356a555a0ce59146836307ff5/libweston/backend-drm/drm.c#L1458-1477
// https://github.com/GNOME/mutter/blob/65d481594227ea7188c0416e8e00b57caeea214f/src/backends/meta-monitor-manager.c#L1618-L1639
_CONVERT("VGA"sv, VGA);
_CONVERT("DVII"sv, DVII);
_CONVERT("DVI-I"sv, DVII);
_CONVERT("DVID"sv, DVID);
_CONVERT("DVI-D"sv, DVID);
_CONVERT("DVIA"sv, DVIA);
_CONVERT("DVI-A"sv, DVIA);
_CONVERT("Composite"sv, Composite);
_CONVERT("SVIDEO"sv, SVIDEO);
_CONVERT("S-Video"sv, SVIDEO);
_CONVERT("LVDS"sv, LVDS);
_CONVERT("Component"sv, Component);
_CONVERT("9PinDIN"sv, 9PinDIN);
_CONVERT("DIN"sv, 9PinDIN);
_CONVERT("DisplayPort"sv, DisplayPort);
_CONVERT("DP"sv, DisplayPort);
_CONVERT("HDMIA"sv, HDMIA);
_CONVERT("HDMI-A"sv, HDMIA);
_CONVERT("HDMI"sv, HDMIA);
_CONVERT("HDMIB"sv, HDMIB);
_CONVERT("HDMI-B"sv, HDMIB);
_CONVERT("TV"sv, TV);
_CONVERT("eDP"sv, eDP);
_CONVERT("VIRTUAL"sv, VIRTUAL);
_CONVERT("Virtual"sv, VIRTUAL);
_CONVERT("DSI"sv, DSI);
_CONVERT("DPI"sv, DPI);
_CONVERT("WRITEBACK"sv, WRITEBACK);
_CONVERT("Writeback"sv, WRITEBACK);
_CONVERT("SPI"sv, SPI);
#ifdef DRM_MODE_CONNECTOR_USB
_CONVERT("USB"sv, USB);
#endif
// If the string starts with "Unknown", it may have the raw type
// value appended to the string. Let's try to read it.
if (string.find("Unknown"sv) == 0) {
std::uint32_t type;
std::string null_terminated_string { string };
if (std::sscanf(null_terminated_string.c_str(), "Unknown%u", &type) == 1) {
return type;
}
}
BOOST_LOG(error) << "Unknown Monitor connector type ["sv << string << "]: Please report this to the GitHub issue tracker"sv;
return DRM_MODE_CONNECTOR_Unknown;
}
class plane_it_t: public round_robin_util::it_wrap_t<plane_t::element_type, plane_it_t> {
public:
plane_it_t(int fd, std::uint32_t *plane_p, std::uint32_t *end):
fd { fd }, plane_p { plane_p }, end { end } {
load_next_valid_plane();
}
plane_it_t(int fd, std::uint32_t *end):
fd { fd }, plane_p { end }, end { end } {}
void
load_next_valid_plane() {
this->plane.reset();
for (; plane_p != end; ++plane_p) {
plane_t plane = drmModeGetPlane(fd, *plane_p);
if (!plane) {
BOOST_LOG(error) << "Couldn't get drm plane ["sv << (end - plane_p) << "]: "sv << strerror(errno);
continue;
}
this->plane = util::make_shared<plane_t>(plane.release());
break;
}
}
void
inc() {
++plane_p;
load_next_valid_plane();
}
bool
eq(const plane_it_t &other) const {
return plane_p == other.plane_p;
}
plane_t::pointer
get() {
return plane.get();
}
int fd;
std::uint32_t *plane_p;
std::uint32_t *end;
util::shared_t<plane_t> plane;
};
struct cursor_t {
// Public properties used during blending
bool visible = false;
std::int32_t x, y;
std::uint32_t dst_w, dst_h;
std::uint32_t src_w, src_h;
std::vector<std::uint8_t> pixels;
unsigned long serial;
// Private properties used for tracking cursor changes
std::uint64_t prop_src_x, prop_src_y, prop_src_w, prop_src_h;
std::uint32_t fb_id;
};
class card_t {
public:
using connector_interal_t = util::safe_ptr<drmModeConnector, drmModeFreeConnector>;
int
init(const char *path) {
cap_sys_admin admin;
fd.el = open(path, O_RDWR);
if (fd.el < 0) {
BOOST_LOG(error) << "Couldn't open: "sv << path << ": "sv << strerror(errno);
return -1;
}
version_t ver { drmGetVersion(fd.el) };
BOOST_LOG(info) << path << " -> "sv << ((ver && ver->name) ? ver->name : "UNKNOWN");
// Open the render node for this card to share with libva.
// If it fails, we'll just share the primary node instead.
char *rendernode_path = drmGetRenderDeviceNameFromFd(fd.el);
if (rendernode_path) {
BOOST_LOG(debug) << "Opening render node: "sv << rendernode_path;
render_fd.el = open(rendernode_path, O_RDWR);
if (render_fd.el < 0) {
BOOST_LOG(warning) << "Couldn't open render node: "sv << rendernode_path << ": "sv << strerror(errno);
render_fd.el = dup(fd.el);
}
free(rendernode_path);
}
else {
BOOST_LOG(warning) << "No render device name for: "sv << path;
render_fd.el = dup(fd.el);
}
if (drmSetClientCap(fd.el, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1)) {
BOOST_LOG(error) << "GPU driver doesn't support universal planes: "sv << path;
return -1;
}
if (drmSetClientCap(fd.el, DRM_CLIENT_CAP_ATOMIC, 1)) {
BOOST_LOG(warning) << "GPU driver doesn't support atomic mode-setting: "sv << path;
#if defined(SUNSHINE_BUILD_X11)
// We won't be able to capture the mouse cursor with KMS on non-atomic drivers,
// so fall back to X11 if it's available and the user didn't explicitly force KMS.
if (window_system == window_system_e::X11 && config::video.capture != "kms") {
BOOST_LOG(info) << "Avoiding KMS capture under X11 due to lack of atomic mode-setting"sv;
return -1;
}
#endif
BOOST_LOG(warning) << "Cursor capture may fail without atomic mode-setting support!"sv;
}
plane_res.reset(drmModeGetPlaneResources(fd.el));
if (!plane_res) {
BOOST_LOG(error) << "Couldn't get drm plane resources"sv;
return -1;
}
return 0;
}
fb_t
fb(plane_t::pointer plane) {
cap_sys_admin admin;
auto fb2 = drmModeGetFB2(fd.el, plane->fb_id);
if (fb2) {
return std::make_unique<wrapper_fb>(fb2);
}
auto fb = drmModeGetFB(fd.el, plane->fb_id);
if (fb) {
return std::make_unique<wrapper_fb>(fb);
}
return nullptr;
}
crtc_t
crtc(std::uint32_t id) {
return drmModeGetCrtc(fd.el, id);
}
encoder_t
encoder(std::uint32_t id) {
return drmModeGetEncoder(fd.el, id);
}
res_t
res() {
return drmModeGetResources(fd.el);
}
bool
is_nvidia() {
version_t ver { drmGetVersion(fd.el) };
return ver && ver->name && strncmp(ver->name, "nvidia-drm", 10) == 0;
}
bool
is_cursor(std::uint32_t plane_id) {
auto props = plane_props(plane_id);
for (auto &[prop, val] : props) {
if (prop->name == "type"sv) {
if (val == DRM_PLANE_TYPE_CURSOR) {
return true;
}
else {
return false;
}
}
}
return false;
}
std::optional<std::uint64_t>
prop_value_by_name(const std::vector<std::pair<prop_t, std::uint64_t>> &props, std::string_view name) {
for (auto &[prop, val] : props) {
if (prop->name == name) {
return val;
}
}
return std::nullopt;
}
std::uint32_t
get_panel_orientation(std::uint32_t plane_id) {
auto props = plane_props(plane_id);
auto value = prop_value_by_name(props, "rotation"sv);
if (value) {
return *value;
}
BOOST_LOG(error) << "Failed to determine panel orientation, defaulting to landscape.";
return DRM_MODE_ROTATE_0;
}
int
get_crtc_index_by_id(std::uint32_t crtc_id) {
auto resources = res();
for (int i = 0; i < resources->count_crtcs; i++) {
if (resources->crtcs[i] == crtc_id) {
return i;
}
}
return -1;
}
connector_interal_t
connector(std::uint32_t id) {
return drmModeGetConnector(fd.el, id);
}
std::vector<connector_t>
monitors(conn_type_count_t &conn_type_count) {
auto resources = res();
if (!resources) {
BOOST_LOG(error) << "Couldn't get connector resources"sv;
return {};
}
std::vector<connector_t> monitors;
std::for_each_n(resources->connectors, resources->count_connectors, [this, &conn_type_count, &monitors](std::uint32_t id) {
auto conn = connector(id);
std::uint32_t crtc_id = 0;
if (conn->encoder_id) {
auto enc = encoder(conn->encoder_id);
if (enc) {
crtc_id = enc->crtc_id;
}
}
auto index = ++conn_type_count[conn->connector_type];
monitors.emplace_back(connector_t {
conn->connector_type,
crtc_id,
index,
conn->connector_id,
conn->connection == DRM_MODE_CONNECTED,
});
});
return monitors;
}
file_t
handleFD(std::uint32_t handle) {
file_t fb_fd;
auto status = drmPrimeHandleToFD(fd.el, handle, 0 /* flags */, &fb_fd.el);
if (status) {
return {};
}
return fb_fd;
}
std::vector<std::pair<prop_t, std::uint64_t>>
props(std::uint32_t id, std::uint32_t type) {
obj_prop_t obj_prop = drmModeObjectGetProperties(fd.el, id, type);
if (!obj_prop) {
return {};
}
std::vector<std::pair<prop_t, std::uint64_t>> props;
props.reserve(obj_prop->count_props);
for (auto x = 0; x < obj_prop->count_props; ++x) {
props.emplace_back(drmModeGetProperty(fd.el, obj_prop->props[x]), obj_prop->prop_values[x]);
}
return props;
}
std::vector<std::pair<prop_t, std::uint64_t>>
plane_props(std::uint32_t id) {
return props(id, DRM_MODE_OBJECT_PLANE);
}
std::vector<std::pair<prop_t, std::uint64_t>>
crtc_props(std::uint32_t id) {
return props(id, DRM_MODE_OBJECT_CRTC);
}
std::vector<std::pair<prop_t, std::uint64_t>>
connector_props(std::uint32_t id) {
return props(id, DRM_MODE_OBJECT_CONNECTOR);
}
plane_t
operator[](std::uint32_t index) {
return drmModeGetPlane(fd.el, plane_res->planes[index]);
}
std::uint32_t
count() {
return plane_res->count_planes;
}
plane_it_t
begin() const {
return plane_it_t { fd.el, plane_res->planes, plane_res->planes + plane_res->count_planes };
}
plane_it_t
end() const {
return plane_it_t { fd.el, plane_res->planes + plane_res->count_planes };
}
file_t fd;
file_t render_fd;
plane_res_t plane_res;
};
std::map<std::uint32_t, monitor_t>
map_crtc_to_monitor(const std::vector<connector_t> &connectors) {
std::map<std::uint32_t, monitor_t> result;
for (auto &connector : connectors) {
result.emplace(connector.crtc_id,
monitor_t {
connector.type,
connector.index,
});
}
return result;
}
struct kms_img_t: public img_t {
~kms_img_t() override {
delete[] data;
data = nullptr;
}
};
void
print(plane_t::pointer plane, fb_t::pointer fb, crtc_t::pointer crtc) {
if (crtc) {
BOOST_LOG(debug) << "crtc("sv << crtc->x << ", "sv << crtc->y << ')';
BOOST_LOG(debug) << "crtc("sv << crtc->width << ", "sv << crtc->height << ')';
BOOST_LOG(debug) << "plane->possible_crtcs == "sv << plane->possible_crtcs;
}
BOOST_LOG(debug)
<< "x("sv << plane->x
<< ") y("sv << plane->y
<< ") crtc_x("sv << plane->crtc_x
<< ") crtc_y("sv << plane->crtc_y
<< ") crtc_id("sv << plane->crtc_id
<< ')';
BOOST_LOG(debug)
<< "Resolution: "sv << fb->width << 'x' << fb->height
<< ": Pitch: "sv << fb->pitches[0]
<< ": Offset: "sv << fb->offsets[0];
std::stringstream ss;
ss << "Format ["sv;
std::for_each_n(plane->formats, plane->count_formats - 1, [&ss](auto format) {
ss << util::view(format) << ", "sv;
});
ss << util::view(plane->formats[plane->count_formats - 1]) << ']';
BOOST_LOG(debug) << ss.str();
}
class display_t: public platf::display_t {
public:
display_t(mem_type_e mem_type):
platf::display_t(), mem_type { mem_type } {}
int
init(const std::string &display_name, const ::video::config_t &config) {
delay = std::chrono::nanoseconds { 1s } / config.framerate;
int monitor_index = util::from_view(display_name);
int monitor = 0;
fs::path card_dir { "/dev/dri"sv };
for (auto &entry : fs::directory_iterator { card_dir }) {
auto file = entry.path().filename();
auto filestring = file.generic_string();
if (filestring.size() < 4 || std::string_view { filestring }.substr(0, 4) != "card"sv) {
continue;
}
kms::card_t card;
if (card.init(entry.path().c_str())) {
continue;
}
// Skip non-Nvidia cards if we're looking for CUDA devices
// unless NVENC is selected manually by the user
if (mem_type == mem_type_e::cuda && !card.is_nvidia()) {
BOOST_LOG(debug) << file << " is not a CUDA device"sv;
if (config::video.encoder != "nvenc") {
continue;
}
}
auto end = std::end(card);
for (auto plane = std::begin(card); plane != end; ++plane) {
// Skip unused planes
if (!plane->fb_id) {
continue;
}
if (card.is_cursor(plane->plane_id)) {
continue;
}
if (monitor != monitor_index) {
++monitor;
continue;
}
auto fb = card.fb(plane.get());
if (!fb) {
BOOST_LOG(error) << "Couldn't get drm fb for plane ["sv << plane->fb_id << "]: "sv << strerror(errno);
return -1;
}
if (!fb->handles[0]) {
BOOST_LOG(error) << "Couldn't get handle for DRM Framebuffer ["sv << plane->fb_id << "]: Probably not permitted"sv;
return -1;
}
for (int i = 0; i < 4; ++i) {
if (!fb->handles[i]) {
break;
}
auto fb_fd = card.handleFD(fb->handles[i]);
if (fb_fd.el < 0) {
BOOST_LOG(error) << "Couldn't get primary file descriptor for Framebuffer ["sv << fb->fb_id << "]: "sv << strerror(errno);
continue;
}
}
auto crtc = card.crtc(plane->crtc_id);
if (!crtc) {
BOOST_LOG(error) << "Couldn't get CRTC info: "sv << strerror(errno);
continue;
}
BOOST_LOG(info) << "Found monitor for DRM screencasting"sv;
// We need to find the correct /dev/dri/card{nr} to correlate the crtc_id with the monitor descriptor
auto pos = std::find_if(std::begin(card_descriptors), std::end(card_descriptors), [&](card_descriptor_t &cd) {
return cd.path == filestring;
});
if (pos == std::end(card_descriptors)) {
// This code path shouldn't happen, but it's there just in case.
// card_descriptors is part of the guesswork after all.
BOOST_LOG(error) << "Couldn't find ["sv << entry.path() << "]: This shouldn't have happened :/"sv;
return -1;
}
// TODO: surf_sd = fb->to_sd();
kms::print(plane.get(), fb.get(), crtc.get());
img_width = fb->width;
img_height = fb->height;
img_offset_x = crtc->x;
img_offset_y = crtc->y;
this->env_width = ::platf::kms::env_width;
this->env_height = ::platf::kms::env_height;
auto monitor = pos->crtc_to_monitor.find(plane->crtc_id);
if (monitor != std::end(pos->crtc_to_monitor)) {
auto &viewport = monitor->second.viewport;
width = viewport.width;
height = viewport.height;
switch (card.get_panel_orientation(plane->plane_id)) {
case DRM_MODE_ROTATE_270:
BOOST_LOG(debug) << "Detected panel orientation at 90, swapping width and height.";
width = viewport.height;
height = viewport.width;
break;
case DRM_MODE_ROTATE_90:
case DRM_MODE_ROTATE_180:
BOOST_LOG(warning) << "Panel orientation is unsupported, screen capture may not work correctly.";
break;
}
offset_x = viewport.offset_x;
offset_y = viewport.offset_y;
}
// This code path shouldn't happen, but it's there just in case.
// crtc_to_monitor is part of the guesswork after all.
else {
BOOST_LOG(warning) << "Couldn't find crtc_id, this shouldn't have happened :\\"sv;
width = crtc->width;
height = crtc->height;
offset_x = crtc->x;
offset_y = crtc->y;
}
plane_id = plane->plane_id;
crtc_id = plane->crtc_id;
crtc_index = card.get_crtc_index_by_id(plane->crtc_id);
// Find the connector for this CRTC
kms::conn_type_count_t conn_type_count;
for (auto &connector : card.monitors(conn_type_count)) {
if (connector.crtc_id == crtc_id) {
BOOST_LOG(info) << "Found connector ID ["sv << connector.connector_id << ']';
connector_id = connector.connector_id;
auto connector_props = card.connector_props(*connector_id);
hdr_metadata_blob_id = card.prop_value_by_name(connector_props, "HDR_OUTPUT_METADATA"sv);
}
}
this->card = std::move(card);
goto break_loop;
}
}
BOOST_LOG(error) << "Couldn't find monitor ["sv << monitor_index << ']';
return -1;
// Neatly break from nested for loop
break_loop:
// Look for the cursor plane for this CRTC
cursor_plane_id = -1;
auto end = std::end(card);
for (auto plane = std::begin(card); plane != end; ++plane) {
if (!card.is_cursor(plane->plane_id)) {
continue;
}
// NB: We do not skip unused planes here because cursor planes
// will look unused if the cursor is currently hidden.
if (!(plane->possible_crtcs & (1 << crtc_index))) {
// Skip cursor planes for other CRTCs
continue;
}
else if (plane->possible_crtcs != (1 << crtc_index)) {
// We assume a 1:1 mapping between cursor planes and CRTCs, which seems to
// match the behavior of drivers in the real world. If it's violated, we'll
// proceed anyway but print a warning in the log.
BOOST_LOG(warning) << "Cursor plane spans multiple CRTCs!"sv;
}
BOOST_LOG(info) << "Found cursor plane ["sv << plane->plane_id << ']';
cursor_plane_id = plane->plane_id;
break;
}
if (cursor_plane_id < 0) {
BOOST_LOG(warning) << "No KMS cursor plane found. Cursor may not be displayed while streaming!"sv;
}
return 0;
}
bool
is_hdr() {
if (!hdr_metadata_blob_id || *hdr_metadata_blob_id == 0) {
return false;
}
prop_blob_t hdr_metadata_blob = drmModeGetPropertyBlob(card.fd.el, *hdr_metadata_blob_id);
if (hdr_metadata_blob == nullptr) {
BOOST_LOG(error) << "Unable to get HDR metadata blob: "sv << strerror(errno);
return false;
}
if (hdr_metadata_blob->length < sizeof(uint32_t) + sizeof(hdr_metadata_infoframe)) {
BOOST_LOG(error) << "HDR metadata blob is too small: "sv << hdr_metadata_blob->length;
return false;
}
auto raw_metadata = (hdr_output_metadata *) hdr_metadata_blob->data;
if (raw_metadata->metadata_type != 0) { // HDMI_STATIC_METADATA_TYPE1
BOOST_LOG(error) << "Unknown HDMI_STATIC_METADATA_TYPE value: "sv << raw_metadata->metadata_type;
return false;
}
if (raw_metadata->hdmi_metadata_type1.metadata_type != 0) { // Static Metadata Type 1
BOOST_LOG(error) << "Unknown secondary metadata type value: "sv << raw_metadata->hdmi_metadata_type1.metadata_type;
return false;
}
// We only support Traditional Gamma SDR or SMPTE 2084 PQ HDR EOTFs.
// Print a warning if we encounter any others.
switch (raw_metadata->hdmi_metadata_type1.eotf) {
case 0: // HDMI_EOTF_TRADITIONAL_GAMMA_SDR
return false;
case 1: // HDMI_EOTF_TRADITIONAL_GAMMA_HDR
BOOST_LOG(warning) << "Unsupported HDR EOTF: Traditional Gamma"sv;
return true;
case 2: // HDMI_EOTF_SMPTE_ST2084
return true;
case 3: // HDMI_EOTF_BT_2100_HLG
BOOST_LOG(warning) << "Unsupported HDR EOTF: HLG"sv;
return true;
default:
BOOST_LOG(warning) << "Unsupported HDR EOTF: "sv << raw_metadata->hdmi_metadata_type1.eotf;
return true;
}
}
bool
get_hdr_metadata(SS_HDR_METADATA &metadata) {
// This performs all the metadata validation
if (!is_hdr()) {
return false;
}
prop_blob_t hdr_metadata_blob = drmModeGetPropertyBlob(card.fd.el, *hdr_metadata_blob_id);
if (hdr_metadata_blob == nullptr) {
BOOST_LOG(error) << "Unable to get HDR metadata blob: "sv << strerror(errno);
return false;
}
auto raw_metadata = (hdr_output_metadata *) hdr_metadata_blob->data;
for (int i = 0; i < 3; i++) {
metadata.displayPrimaries[i].x = raw_metadata->hdmi_metadata_type1.display_primaries[i].x;
metadata.displayPrimaries[i].y = raw_metadata->hdmi_metadata_type1.display_primaries[i].y;
}
metadata.whitePoint.x = raw_metadata->hdmi_metadata_type1.white_point.x;
metadata.whitePoint.y = raw_metadata->hdmi_metadata_type1.white_point.y;
metadata.maxDisplayLuminance = raw_metadata->hdmi_metadata_type1.max_display_mastering_luminance;
metadata.minDisplayLuminance = raw_metadata->hdmi_metadata_type1.min_display_mastering_luminance;
metadata.maxContentLightLevel = raw_metadata->hdmi_metadata_type1.max_cll;
metadata.maxFrameAverageLightLevel = raw_metadata->hdmi_metadata_type1.max_fall;
return true;
}
void
update_cursor() {
if (cursor_plane_id < 0) {
return;
}
plane_t plane = drmModeGetPlane(card.fd.el, cursor_plane_id);
std::optional<std::int32_t> prop_crtc_x;
std::optional<std::int32_t> prop_crtc_y;
std::optional<std::uint32_t> prop_crtc_w;
std::optional<std::uint32_t> prop_crtc_h;
std::optional<std::uint64_t> prop_src_x;
std::optional<std::uint64_t> prop_src_y;
std::optional<std::uint64_t> prop_src_w;
std::optional<std::uint64_t> prop_src_h;
auto props = card.plane_props(cursor_plane_id);
for (auto &[prop, val] : props) {
if (prop->name == "CRTC_X"sv) {
prop_crtc_x = val;
}
else if (prop->name == "CRTC_Y"sv) {
prop_crtc_y = val;
}
else if (prop->name == "CRTC_W"sv) {
prop_crtc_w = val;
}
else if (prop->name == "CRTC_H"sv) {
prop_crtc_h = val;
}
else if (prop->name == "SRC_X"sv) {
prop_src_x = val;
}
else if (prop->name == "SRC_Y"sv) {
prop_src_y = val;
}
else if (prop->name == "SRC_W"sv) {
prop_src_w = val;
}
else if (prop->name == "SRC_H"sv) {
prop_src_h = val;
}
}
if (!prop_crtc_w || !prop_crtc_h || !prop_crtc_x || !prop_crtc_y) {
BOOST_LOG(error) << "Cursor plane is missing required plane CRTC properties!"sv;
BOOST_LOG(error) << "Atomic mode-setting must be enabled to capture the cursor!"sv;
cursor_plane_id = -1;
captured_cursor.visible = false;
return;
}
if (!prop_src_x || !prop_src_y || !prop_src_w || !prop_src_h) {
BOOST_LOG(error) << "Cursor plane is missing required plane SRC properties!"sv;
BOOST_LOG(error) << "Atomic mode-setting must be enabled to capture the cursor!"sv;
cursor_plane_id = -1;
captured_cursor.visible = false;
return;
}
// Update the cursor position and size unconditionally
captured_cursor.x = *prop_crtc_x;
captured_cursor.y = *prop_crtc_y;
captured_cursor.dst_w = *prop_crtc_w;
captured_cursor.dst_h = *prop_crtc_h;
// We're technically cheating a bit here by assuming that we can detect
// changes to the cursor plane via property adjustments. If this isn't
// true, we'll really have to mmap() the dmabuf and draw that every time.
bool cursor_dirty = false;
if (!plane->fb_id) {
captured_cursor.visible = false;
captured_cursor.fb_id = 0;
}
else if (plane->fb_id != captured_cursor.fb_id) {
BOOST_LOG(debug) << "Refreshing cursor image after FB changed"sv;
cursor_dirty = true;
}
else if (*prop_src_x != captured_cursor.prop_src_x ||
*prop_src_y != captured_cursor.prop_src_y ||
*prop_src_w != captured_cursor.prop_src_w ||
*prop_src_h != captured_cursor.prop_src_h) {
BOOST_LOG(debug) << "Refreshing cursor image after source dimensions changed"sv;
cursor_dirty = true;
}
// If the cursor is dirty, map it so we can download the new image
if (cursor_dirty) {
auto fb = card.fb(plane.get());
if (!fb || !fb->handles[0]) {
// This means the cursor is not currently visible
captured_cursor.visible = false;
return;
}
// All known cursor planes in the wild are ARGB8888
if (fb->pixel_format != DRM_FORMAT_ARGB8888) {
BOOST_LOG(error) << "Unsupported non-ARGB8888 cursor format: "sv << fb->pixel_format;
captured_cursor.visible = false;
cursor_plane_id = -1;
return;
}
// All known cursor planes in the wild require linear buffers
if (fb->modifier != DRM_FORMAT_MOD_LINEAR && fb->modifier != DRM_FORMAT_MOD_INVALID) {
BOOST_LOG(error) << "Unsupported non-linear cursor modifier: "sv << fb->modifier;
captured_cursor.visible = false;
cursor_plane_id = -1;
return;
}
// The SRC_* properties are in Q16.16 fixed point, so convert to integers
auto src_x = *prop_src_x >> 16;
auto src_y = *prop_src_y >> 16;
auto src_w = *prop_src_w >> 16;
auto src_h = *prop_src_h >> 16;
// Check for a legal source rectangle
if (src_x + src_w > fb->width || src_y + src_h > fb->height) {
BOOST_LOG(error) << "Illegal source size: ["sv << src_x + src_w << ',' << src_y + src_h << "] > ["sv << fb->width << ',' << fb->height << ']';
captured_cursor.visible = false;
return;
}
file_t plane_fd = card.handleFD(fb->handles[0]);
if (plane_fd.el < 0) {
captured_cursor.visible = false;
return;
}
// We will map the entire region, but only copy what the source rectangle specifies
size_t mapped_size = ((size_t) fb->pitches[0]) * fb->height;
void *mapped_data = mmap(nullptr, mapped_size, PROT_READ, MAP_SHARED, plane_fd.el, fb->offsets[0]);
// If we got ENOSYS back, let's try to map it as a dumb buffer instead (required for Nvidia GPUs)
if (mapped_data == MAP_FAILED && errno == ENOSYS) {
drm_mode_map_dumb map = {};
map.handle = fb->handles[0];
if (drmIoctl(card.fd.el, DRM_IOCTL_MODE_MAP_DUMB, &map) < 0) {
BOOST_LOG(error) << "Failed to map cursor FB as dumb buffer: "sv << strerror(errno);
captured_cursor.visible = false;
return;
}
mapped_data = mmap(nullptr, mapped_size, PROT_READ, MAP_SHARED, card.fd.el, map.offset);
}
if (mapped_data == MAP_FAILED) {
BOOST_LOG(error) << "Failed to mmap cursor FB: "sv << strerror(errno);
captured_cursor.visible = false;
return;
}
captured_cursor.pixels.resize(src_w * src_h * 4);
// Prepare to read the dmabuf from the CPU
struct dma_buf_sync sync;
sync.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_READ;
drmIoctl(plane_fd.el, DMA_BUF_IOCTL_SYNC, &sync);
// If the image is tightly packed, copy it in one shot
if (fb->pitches[0] == src_w * 4 && src_x == 0) {
memcpy(captured_cursor.pixels.data(), &((std::uint8_t *) mapped_data)[src_y * fb->pitches[0]], src_h * fb->pitches[0]);
}
else {
// Copy row by row to deal with mismatched pitch or an X offset
auto pixel_dst = captured_cursor.pixels.data();
for (int y = 0; y < src_h; y++) {
memcpy(&pixel_dst[y * (src_w * 4)], &((std::uint8_t *) mapped_data)[(y + src_y) * fb->pitches[0] + (src_x * 4)], src_w * 4);
}
}
// End the CPU read and unmap the dmabuf
sync.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_READ;
drmIoctl(plane_fd.el, DMA_BUF_IOCTL_SYNC, &sync);
munmap(mapped_data, mapped_size);
captured_cursor.visible = true;
captured_cursor.src_w = src_w;
captured_cursor.src_h = src_h;
captured_cursor.prop_src_x = *prop_src_x;
captured_cursor.prop_src_y = *prop_src_y;
captured_cursor.prop_src_w = *prop_src_w;
captured_cursor.prop_src_h = *prop_src_h;
captured_cursor.fb_id = plane->fb_id;
++captured_cursor.serial;
}
}
inline capture_e
refresh(file_t *file, egl::surface_descriptor_t *sd, std::optional<std::chrono::steady_clock::time_point> &frame_timestamp) {
// Check for a change in HDR metadata
if (connector_id) {
auto connector_props = card.connector_props(*connector_id);
if (hdr_metadata_blob_id != card.prop_value_by_name(connector_props, "HDR_OUTPUT_METADATA"sv)) {
BOOST_LOG(info) << "Reinitializing capture after HDR metadata change"sv;
return capture_e::reinit;
}
}
plane_t plane = drmModeGetPlane(card.fd.el, plane_id);
frame_timestamp = std::chrono::steady_clock::now();
auto fb = card.fb(plane.get());
if (!fb) {
// This can happen if the display is being reconfigured while streaming
BOOST_LOG(warning) << "Couldn't get drm fb for plane ["sv << plane->fb_id << "]: "sv << strerror(errno);
return capture_e::timeout;
}
if (!fb->handles[0]) {
BOOST_LOG(error) << "Couldn't get handle for DRM Framebuffer ["sv << plane->fb_id << "]: Probably not permitted"sv;
return capture_e::error;
}
for (int y = 0; y < 4; ++y) {
if (!fb->handles[y]) {
// setting sd->fds[y] to a negative value indicates that sd->offsets[y] and sd->pitches[y]
// are uninitialized and contain invalid values.
sd->fds[y] = -1;
// It's not clear whether there could still be valid handles left.
// So, continue anyway.
// TODO: Is this redundant?
continue;
}
file[y] = card.handleFD(fb->handles[y]);
if (file[y].el < 0) {
BOOST_LOG(error) << "Couldn't get primary file descriptor for Framebuffer ["sv << fb->fb_id << "]: "sv << strerror(errno);
return capture_e::error;
}
sd->fds[y] = file[y].el;
sd->offsets[y] = fb->offsets[y];
sd->pitches[y] = fb->pitches[y];
}
sd->width = fb->width;
sd->height = fb->height;
sd->modifier = fb->modifier;
sd->fourcc = fb->pixel_format;
if (
fb->width != img_width ||
fb->height != img_height) {
return capture_e::reinit;
}
update_cursor();
return capture_e::ok;
}
mem_type_e mem_type;
std::chrono::nanoseconds delay;
int img_width, img_height;
int img_offset_x, img_offset_y;
int plane_id;
int crtc_id;
int crtc_index;
std::optional<uint32_t> connector_id;
std::optional<uint64_t> hdr_metadata_blob_id;
int cursor_plane_id;
cursor_t captured_cursor {};
card_t card;
};
class display_ram_t: public display_t {
public:
display_ram_t(mem_type_e mem_type):
display_t(mem_type) {}
int
init(const std::string &display_name, const ::video::config_t &config) {
if (!gbm::create_device) {
BOOST_LOG(warning) << "libgbm not initialized"sv;
return -1;
}
if (display_t::init(display_name, config)) {
return -1;
}
gbm.reset(gbm::create_device(card.fd.el));
if (!gbm) {
BOOST_LOG(error) << "Couldn't create GBM device: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return -1;
}
display = egl::make_display(gbm.get());
if (!display) {
return -1;
}
auto ctx_opt = egl::make_ctx(display.get());
if (!ctx_opt) {
return -1;
}
ctx = std::move(*ctx_opt);
return 0;
}
capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override {
auto next_frame = std::chrono::steady_clock::now();
sleep_overshoot_logger.reset();
while (true) {
auto now = std::chrono::steady_clock::now();
if (next_frame > now) {
std::this_thread::sleep_for(next_frame - now);
sleep_overshoot_logger.first_point(next_frame);
sleep_overshoot_logger.second_point_now_and_log();
}
next_frame += delay;
if (next_frame < now) { // some major slowdown happened; we couldn't keep up
next_frame = now + delay;
}
std::shared_ptr<platf::img_t> img_out;
auto status = snapshot(pull_free_image_cb, img_out, 1000ms, *cursor);
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return platf::capture_e::ok;
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return platf::capture_e::ok;
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv << (int) status << ']';
return status;
}
}
return capture_e::ok;
}
std::unique_ptr<avcodec_encode_device_t>
make_avcodec_encode_device(pix_fmt_e pix_fmt) override {
#ifdef SUNSHINE_BUILD_VAAPI
if (mem_type == mem_type_e::vaapi) {
return va::make_avcodec_encode_device(width, height, false);
}
#endif
#ifdef SUNSHINE_BUILD_CUDA
if (mem_type == mem_type_e::cuda) {
return cuda::make_avcodec_encode_device(width, height, false);
}
#endif
return std::make_unique<avcodec_encode_device_t>();
}
void
blend_cursor(img_t &img) {
// TODO: Cursor scaling is not supported in this codepath.
// We always draw the cursor at the source size.
auto pixels = (int *) img.data;
int32_t screen_height = img.height;
int32_t screen_width = img.width;
// This is the position in the target that we will start drawing the cursor
auto cursor_x = std::max<int32_t>(0, captured_cursor.x - img_offset_x);
auto cursor_y = std::max<int32_t>(0, captured_cursor.y - img_offset_y);
// If the cursor is partially off screen, the coordinates may be negative
// which means we will draw the top-right visible portion of the cursor only.
auto cursor_delta_x = cursor_x - std::max<int32_t>(-captured_cursor.src_w, captured_cursor.x - img_offset_x);
auto cursor_delta_y = cursor_y - std::max<int32_t>(-captured_cursor.src_h, captured_cursor.y - img_offset_y);
auto delta_height = std::min<uint32_t>(captured_cursor.src_h, std::max<int32_t>(0, screen_height - cursor_y)) - cursor_delta_y;
auto delta_width = std::min<uint32_t>(captured_cursor.src_w, std::max<int32_t>(0, screen_width - cursor_x)) - cursor_delta_x;
for (auto y = 0; y < delta_height; ++y) {
// Offset into the cursor image to skip drawing the parts of the cursor image that are off screen
//
// NB: We must access the elements via the data() function because cursor_end may point to the
// the first element beyond the valid range of the vector. Using vector's [] operator in that
// manner is undefined behavior (and triggers errors when using debug libc++), while doing the
// same with an array is fine.
auto cursor_begin = (uint32_t *) &captured_cursor.pixels.data()[((y + cursor_delta_y) * captured_cursor.src_w + cursor_delta_x) * 4];
auto cursor_end = (uint32_t *) &captured_cursor.pixels.data()[((y + cursor_delta_y) * captured_cursor.src_w + delta_width + cursor_delta_x) * 4];
auto pixels_begin = &pixels[(y + cursor_y) * (img.row_pitch / img.pixel_pitch) + cursor_x];
std::for_each(cursor_begin, cursor_end, [&](uint32_t cursor_pixel) {
auto colors_in = (uint8_t *) pixels_begin;
auto alpha = (*(uint *) &cursor_pixel) >> 24u;
if (alpha == 255) {
*pixels_begin = cursor_pixel;
}
else {
auto colors_out = (uint8_t *) &cursor_pixel;
colors_in[0] = colors_out[0] + (colors_in[0] * (255 - alpha) + 255 / 2) / 255;
colors_in[1] = colors_out[1] + (colors_in[1] * (255 - alpha) + 255 / 2) / 255;
colors_in[2] = colors_out[2] + (colors_in[2] * (255 - alpha) + 255 / 2) / 255;
}
++pixels_begin;
});
}
}
capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor) {
file_t fb_fd[4];
egl::surface_descriptor_t sd;
std::optional<std::chrono::steady_clock::time_point> frame_timestamp;
auto status = refresh(fb_fd, &sd, frame_timestamp);
if (status != capture_e::ok) {
return status;
}
auto rgb_opt = egl::import_source(display.get(), sd);
if (!rgb_opt) {
return capture_e::error;
}
auto &rgb = *rgb_opt;
gl::ctx.BindTexture(GL_TEXTURE_2D, rgb->tex[0]);
// Don't remove these lines, see https://github.com/LizardByte/Sunshine/issues/453
int w, h;
gl::ctx.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &w);
gl::ctx.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &h);
BOOST_LOG(debug) << "width and height: w "sv << w << " h "sv << h;
if (!pull_free_image_cb(img_out)) {
return platf::capture_e::interrupted;
}
gl::ctx.GetTextureSubImage(rgb->tex[0], 0, img_offset_x, img_offset_y, 0, width, height, 1, GL_BGRA, GL_UNSIGNED_BYTE, img_out->height * img_out->row_pitch, img_out->data);
img_out->frame_timestamp = frame_timestamp;
if (cursor && captured_cursor.visible) {
blend_cursor(*img_out);
}
return capture_e::ok;
}
std::shared_ptr<img_t>
alloc_img() override {
auto img = std::make_shared<kms_img_t>();
img->width = width;
img->height = height;
img->pixel_pitch = 4;
img->row_pitch = img->pixel_pitch * width;
img->data = new std::uint8_t[height * img->row_pitch];
return img;
}
int
dummy_img(platf::img_t *img) override {
return 0;
}
gbm::gbm_t gbm;
egl::display_t display;
egl::ctx_t ctx;
};
class display_vram_t: public display_t {
public:
display_vram_t(mem_type_e mem_type):
display_t(mem_type) {}
std::unique_ptr<avcodec_encode_device_t>
make_avcodec_encode_device(pix_fmt_e pix_fmt) override {
#ifdef SUNSHINE_BUILD_VAAPI
if (mem_type == mem_type_e::vaapi) {
return va::make_avcodec_encode_device(width, height, dup(card.render_fd.el), img_offset_x, img_offset_y, true);
}
#endif
#ifdef SUNSHINE_BUILD_CUDA
if (mem_type == mem_type_e::cuda) {
return cuda::make_avcodec_gl_encode_device(width, height, img_offset_x, img_offset_y);
}
#endif
BOOST_LOG(error) << "Unsupported pixel format for egl::display_vram_t: "sv << platf::from_pix_fmt(pix_fmt);
return nullptr;
}
std::shared_ptr<img_t>
alloc_img() override {
auto img = std::make_shared<egl::img_descriptor_t>();
img->width = width;
img->height = height;
img->serial = std::numeric_limits<decltype(img->serial)>::max();
img->data = nullptr;
img->pixel_pitch = 4;
img->sequence = 0;
std::fill_n(img->sd.fds, 4, -1);
return img;
}
int
dummy_img(platf::img_t *img) override {
// Empty images are recognized as dummies by the zero sequence number
return 0;
}
capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) {
auto next_frame = std::chrono::steady_clock::now();
sleep_overshoot_logger.reset();
while (true) {
auto now = std::chrono::steady_clock::now();
if (next_frame > now) {
std::this_thread::sleep_for(next_frame - now);
sleep_overshoot_logger.first_point(next_frame);
sleep_overshoot_logger.second_point_now_and_log();
}
next_frame += delay;
if (next_frame < now) { // some major slowdown happened; we couldn't keep up
next_frame = now + delay;
}
std::shared_ptr<platf::img_t> img_out;
auto status = snapshot(pull_free_image_cb, img_out, 1000ms, *cursor);
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return platf::capture_e::ok;
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return platf::capture_e::ok;
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv << (int) status << ']';
return status;
}
}
return capture_e::ok;
}
capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds /* timeout */, bool cursor) {
file_t fb_fd[4];
if (!pull_free_image_cb(img_out)) {
return platf::capture_e::interrupted;
}
auto img = (egl::img_descriptor_t *) img_out.get();
img->reset();
auto status = refresh(fb_fd, &img->sd, img->frame_timestamp);
if (status != capture_e::ok) {
return status;
}
img->sequence = ++sequence;
if (cursor && captured_cursor.visible) {
// Copy new cursor pixel data if it's been updated
if (img->serial != captured_cursor.serial) {
img->buffer = captured_cursor.pixels;
img->serial = captured_cursor.serial;
}
img->x = captured_cursor.x;
img->y = captured_cursor.y;
img->src_w = captured_cursor.src_w;
img->src_h = captured_cursor.src_h;
img->width = captured_cursor.dst_w;
img->height = captured_cursor.dst_h;
img->pixel_pitch = 4;
img->row_pitch = img->pixel_pitch * img->width;
img->data = img->buffer.data();
}
else {
img->data = nullptr;
}
for (auto x = 0; x < 4; ++x) {
fb_fd[x].release();
}
return capture_e::ok;
}
int
init(const std::string &display_name, const ::video::config_t &config) {
if (display_t::init(display_name, config)) {
return -1;
}
#ifdef SUNSHINE_BUILD_VAAPI
if (mem_type == mem_type_e::vaapi && !va::validate(card.render_fd.el)) {
BOOST_LOG(warning) << "Monitor "sv << display_name << " doesn't support hardware encoding. Reverting back to GPU -> RAM -> GPU"sv;
return -1;
}
#endif
#ifndef SUNSHINE_BUILD_CUDA
if (mem_type == mem_type_e::cuda) {
BOOST_LOG(warning) << "Attempting to use NVENC without CUDA support. Reverting back to GPU -> RAM -> GPU"sv;
return -1;
}
#endif
return 0;
}
std::uint64_t sequence {};
};
} // namespace kms
std::shared_ptr<display_t>
kms_display(mem_type_e hwdevice_type, const std::string &display_name, const ::video::config_t &config) {
if (hwdevice_type == mem_type_e::vaapi || hwdevice_type == mem_type_e::cuda) {
auto disp = std::make_shared<kms::display_vram_t>(hwdevice_type);
if (!disp->init(display_name, config)) {
return disp;
}
// In the case of failure, attempt the old method for VAAPI
}
auto disp = std::make_shared<kms::display_ram_t>(hwdevice_type);
if (disp->init(display_name, config)) {
return nullptr;
}
return disp;
}
/**
* On Wayland, it's not possible to determine the position of the monitor on the desktop with KMS.
* Wayland does allow applications to query attached monitors on the desktop,
* however, the naming scheme is not standardized across implementations.
*
* As a result, correlating the KMS output to the wayland outputs is guess work at best.
* But, it's necessary for absolute mouse coordinates to work.
*
* This is an ugly hack :(
*/
void
correlate_to_wayland(std::vector<kms::card_descriptor_t> &cds) {
auto monitors = wl::monitors();
BOOST_LOG(info) << "-------- Start of KMS monitor list --------"sv;
for (auto &monitor : monitors) {
std::string_view name = monitor->name;
// Try to convert names in the format:
// {type}-{index}
// {index} is n'th occurrence of {type}
auto index_begin = name.find_last_of('-');
std::uint32_t index;
if (index_begin == std::string_view::npos) {
index = 1;
}
else {
index = std::max<int64_t>(1, util::from_view(name.substr(index_begin + 1)));
}
auto type = kms::from_view(name.substr(0, index_begin));
for (auto &card_descriptor : cds) {
for (auto &[_, monitor_descriptor] : card_descriptor.crtc_to_monitor) {
if (monitor_descriptor.index == index && monitor_descriptor.type == type) {
monitor_descriptor.viewport.offset_x = monitor->viewport.offset_x;
monitor_descriptor.viewport.offset_y = monitor->viewport.offset_y;
// A sanity check, it's guesswork after all.
if (
monitor_descriptor.viewport.width != monitor->viewport.width ||
monitor_descriptor.viewport.height != monitor->viewport.height) {
BOOST_LOG(warning)
<< "Mismatch on expected Resolution compared to actual resolution: "sv
<< monitor_descriptor.viewport.width << 'x' << monitor_descriptor.viewport.height
<< " vs "sv
<< monitor->viewport.width << 'x' << monitor->viewport.height;
}
BOOST_LOG(info) << "Monitor " << monitor_descriptor.monitor_index << " is "sv << name << ": "sv << monitor->description;
goto break_for_loop;
}
}
}
break_for_loop:
BOOST_LOG(verbose) << "Reduced to name: "sv << name << ": "sv << index;
}
BOOST_LOG(info) << "--------- End of KMS monitor list ---------"sv;
}
// A list of names of displays accepted as display_name
std::vector<std::string>
kms_display_names(mem_type_e hwdevice_type) {
int count = 0;
if (!fs::exists("/dev/dri")) {
BOOST_LOG(warning) << "Couldn't find /dev/dri, kmsgrab won't be enabled"sv;
return {};
}
if (!gbm::create_device) {
BOOST_LOG(warning) << "libgbm not initialized"sv;
return {};
}
kms::conn_type_count_t conn_type_count;
std::vector<kms::card_descriptor_t> cds;
std::vector<std::string> display_names;
fs::path card_dir { "/dev/dri"sv };
for (auto &entry : fs::directory_iterator { card_dir }) {
auto file = entry.path().filename();
auto filestring = file.generic_string();
if (std::string_view { filestring }.substr(0, 4) != "card"sv) {
continue;
}
kms::card_t card;
if (card.init(entry.path().c_str())) {
continue;
}
// Skip non-Nvidia cards if we're looking for CUDA devices
// unless NVENC is selected manually by the user
if (hwdevice_type == mem_type_e::cuda && !card.is_nvidia()) {
BOOST_LOG(debug) << file << " is not a CUDA device"sv;
if (config::video.encoder == "nvenc") {
BOOST_LOG(warning) << "Using NVENC with your display connected to a different GPU may not work properly!"sv;
}
else {
continue;
}
}
auto crtc_to_monitor = kms::map_crtc_to_monitor(card.monitors(conn_type_count));
auto end = std::end(card);
for (auto plane = std::begin(card); plane != end; ++plane) {
// Skip unused planes
if (!plane->fb_id) {
continue;
}
if (card.is_cursor(plane->plane_id)) {
continue;
}
auto fb = card.fb(plane.get());
if (!fb) {
BOOST_LOG(error) << "Couldn't get drm fb for plane ["sv << plane->fb_id << "]: "sv << strerror(errno);
continue;
}
if (!fb->handles[0]) {
BOOST_LOG(error) << "Couldn't get handle for DRM Framebuffer ["sv << plane->fb_id << "]: Probably not permitted"sv;
BOOST_LOG((window_system != window_system_e::X11 || config::video.capture == "kms") ? fatal : error)
<< "You must run [sudo setcap cap_sys_admin+p $(readlink -f $(which sunshine))] for KMS display capture to work!\n"sv
<< "If you installed from AppImage or Flatpak, please refer to the official documentation:\n"sv
<< "https://docs.lizardbyte.dev/projects/sunshine/en/latest/about/setup.html#install"sv;
break;
}
// This appears to return the offset of the monitor
auto crtc = card.crtc(plane->crtc_id);
if (!crtc) {
BOOST_LOG(error) << "Couldn't get CRTC info: "sv << strerror(errno);
continue;
}
auto it = crtc_to_monitor.find(plane->crtc_id);
if (it != std::end(crtc_to_monitor)) {
it->second.viewport = platf::touch_port_t {
(int) crtc->x,
(int) crtc->y,
(int) crtc->width,
(int) crtc->height,
};
it->second.monitor_index = count;
}
kms::env_width = std::max(kms::env_width, (int) (crtc->x + crtc->width));
kms::env_height = std::max(kms::env_height, (int) (crtc->y + crtc->height));
kms::print(plane.get(), fb.get(), crtc.get());
display_names.emplace_back(std::to_string(count++));
}
cds.emplace_back(kms::card_descriptor_t {
std::move(file),
std::move(crtc_to_monitor),
});
}
if (!wl::init()) {
correlate_to_wayland(cds);
}
// Deduce the full virtual desktop size
kms::env_width = 0;
kms::env_height = 0;
for (auto &card_descriptor : cds) {
for (auto &[_, monitor_descriptor] : card_descriptor.crtc_to_monitor) {
BOOST_LOG(debug) << "Monitor description"sv;
BOOST_LOG(debug) << "Resolution: "sv << monitor_descriptor.viewport.width << 'x' << monitor_descriptor.viewport.height;
BOOST_LOG(debug) << "Offset: "sv << monitor_descriptor.viewport.offset_x << 'x' << monitor_descriptor.viewport.offset_y;
kms::env_width = std::max(kms::env_width, (int) (monitor_descriptor.viewport.offset_x + monitor_descriptor.viewport.width));
kms::env_height = std::max(kms::env_height, (int) (monitor_descriptor.viewport.offset_y + monitor_descriptor.viewport.height));
}
}
BOOST_LOG(debug) << "Desktop resolution: "sv << kms::env_width << 'x' << kms::env_height;
kms::card_descriptors = std::move(cds);
return display_names;
}
} // namespace platf
| 59,637
|
C++
|
.cpp
| 1,434
| 32.041841
| 180
| 0.564825
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,996
|
vaapi.cpp
|
LizardByte_Sunshine/src/platform/linux/vaapi.cpp
|
/**
* @file src/platform/linux/vaapi.cpp
* @brief Definitions for VA-API hardware accelerated capture.
*/
#include <sstream>
#include <string>
#include <fcntl.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/pixdesc.h>
#include <va/va.h>
#include <va/va_drm.h>
#if !VA_CHECK_VERSION(1, 9, 0)
// vaSyncBuffer stub allows Sunshine built against libva <2.9.0 to link against ffmpeg on libva 2.9.0 or later
VAStatus
vaSyncBuffer(
VADisplay dpy,
VABufferID buf_id,
uint64_t timeout_ns) {
return VA_STATUS_ERROR_UNIMPLEMENTED;
}
#endif
}
#include "graphics.h"
#include "misc.h"
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include "src/video.h"
using namespace std::literals;
extern "C" struct AVBufferRef;
namespace va {
constexpr auto SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2 = 0x40000000;
constexpr auto EXPORT_SURFACE_WRITE_ONLY = 0x0002;
constexpr auto EXPORT_SURFACE_SEPARATE_LAYERS = 0x0004;
using VADisplay = void *;
using VAStatus = int;
using VAGenericID = unsigned int;
using VASurfaceID = VAGenericID;
struct DRMPRIMESurfaceDescriptor {
// VA Pixel format fourcc of the whole surface (VA_FOURCC_*).
uint32_t fourcc;
uint32_t width;
uint32_t height;
// Number of distinct DRM objects making up the surface.
uint32_t num_objects;
struct {
// DRM PRIME file descriptor for this object.
// Needs to be closed manually
int fd;
// Total size of this object (may include regions which are not part of the surface)
uint32_t size;
// Format modifier applied to this object, not sure what that means
uint64_t drm_format_modifier;
} objects[4];
// Number of layers making up the surface.
uint32_t num_layers;
struct {
// DRM format fourcc of this layer (DRM_FOURCC_*).
uint32_t drm_format;
// Number of planes in this layer.
uint32_t num_planes;
// references objects --> DRMPRIMESurfaceDescriptor.objects[object_index[0]]
uint32_t object_index[4];
// Offset within the object of each plane.
uint32_t offset[4];
// Pitch of each plane.
uint32_t pitch[4];
} layers[4];
};
using display_t = util::safe_ptr_v2<void, VAStatus, vaTerminate>;
int
vaapi_init_avcodec_hardware_input_buffer(platf::avcodec_encode_device_t *encode_device, AVBufferRef **hw_device_buf);
class va_t: public platf::avcodec_encode_device_t {
public:
int
init(int in_width, int in_height, file_t &&render_device) {
file = std::move(render_device);
if (!gbm::create_device) {
BOOST_LOG(warning) << "libgbm not initialized"sv;
return -1;
}
this->data = (void *) vaapi_init_avcodec_hardware_input_buffer;
gbm.reset(gbm::create_device(file.el));
if (!gbm) {
char string[1024];
BOOST_LOG(error) << "Couldn't create GBM device: ["sv << strerror_r(errno, string, sizeof(string)) << ']';
return -1;
}
display = egl::make_display(gbm.get());
if (!display) {
return -1;
}
auto ctx_opt = egl::make_ctx(display.get());
if (!ctx_opt) {
return -1;
}
ctx = std::move(*ctx_opt);
width = in_width;
height = in_height;
return 0;
}
/**
* @brief Finds a supported VA entrypoint for the given VA profile.
* @param profile The profile to match.
* @return A valid encoding entrypoint or 0 on failure.
*/
VAEntrypoint
select_va_entrypoint(VAProfile profile) {
std::vector<VAEntrypoint> entrypoints(vaMaxNumEntrypoints(va_display));
int num_eps;
auto status = vaQueryConfigEntrypoints(va_display, profile, entrypoints.data(), &num_eps);
if (status != VA_STATUS_SUCCESS) {
BOOST_LOG(error) << "Failed to query VA entrypoints: "sv << vaErrorStr(status);
return (VAEntrypoint) 0;
}
entrypoints.resize(num_eps);
// Sorted in order of descending preference
VAEntrypoint ep_preferences[] = {
VAEntrypointEncSliceLP,
VAEntrypointEncSlice,
VAEntrypointEncPicture
};
for (auto ep_pref : ep_preferences) {
if (std::find(entrypoints.begin(), entrypoints.end(), ep_pref) != entrypoints.end()) {
return ep_pref;
}
}
return (VAEntrypoint) 0;
}
/**
* @brief Determines if a given VA profile is supported.
* @param profile The profile to match.
* @return Boolean value indicating if the profile is supported.
*/
bool
is_va_profile_supported(VAProfile profile) {
std::vector<VAProfile> profiles(vaMaxNumProfiles(va_display));
int num_profs;
auto status = vaQueryConfigProfiles(va_display, profiles.data(), &num_profs);
if (status != VA_STATUS_SUCCESS) {
BOOST_LOG(error) << "Failed to query VA profiles: "sv << vaErrorStr(status);
return false;
}
profiles.resize(num_profs);
return std::find(profiles.begin(), profiles.end(), profile) != profiles.end();
}
/**
* @brief Determines the matching VA profile for the codec configuration.
* @param ctx The FFmpeg codec context.
* @return The matching VA profile or `VAProfileNone` on failure.
*/
VAProfile
get_va_profile(AVCodecContext *ctx) {
if (ctx->codec_id == AV_CODEC_ID_H264) {
// There's no VAAPI profile for H.264 4:4:4
return VAProfileH264High;
}
else if (ctx->codec_id == AV_CODEC_ID_HEVC) {
switch (ctx->profile) {
case FF_PROFILE_HEVC_REXT:
switch (av_pix_fmt_desc_get(ctx->sw_pix_fmt)->comp[0].depth) {
case 10:
return VAProfileHEVCMain444_10;
case 8:
return VAProfileHEVCMain444;
}
break;
case FF_PROFILE_HEVC_MAIN_10:
return VAProfileHEVCMain10;
case FF_PROFILE_HEVC_MAIN:
return VAProfileHEVCMain;
}
}
else if (ctx->codec_id == AV_CODEC_ID_AV1) {
switch (ctx->profile) {
case FF_PROFILE_AV1_HIGH:
return VAProfileAV1Profile1;
case FF_PROFILE_AV1_MAIN:
return VAProfileAV1Profile0;
}
}
BOOST_LOG(error) << "Unknown encoder profile: "sv << ctx->profile;
return VAProfileNone;
}
void
init_codec_options(AVCodecContext *ctx, AVDictionary **options) override {
auto va_profile = get_va_profile(ctx);
if (va_profile == VAProfileNone || !is_va_profile_supported(va_profile)) {
// Don't bother doing anything if the profile isn't supported
return;
}
auto va_entrypoint = select_va_entrypoint(va_profile);
if (va_entrypoint == 0) {
// It's possible that only decoding is supported for this profile
return;
}
auto vendor = vaQueryVendorString(va_display);
if (va_entrypoint == VAEntrypointEncSliceLP) {
BOOST_LOG(info) << "Using LP encoding mode"sv;
av_dict_set_int(options, "low_power", 1, 0);
}
else {
BOOST_LOG(info) << "Using normal encoding mode"sv;
}
VAConfigAttrib rc_attr = { VAConfigAttribRateControl };
auto status = vaGetConfigAttributes(va_display, va_profile, va_entrypoint, &rc_attr, 1);
if (status != VA_STATUS_SUCCESS) {
// Stick to the default rate control (CQP)
rc_attr.value = 0;
}
VAConfigAttrib slice_attr = { VAConfigAttribEncMaxSlices };
status = vaGetConfigAttributes(va_display, va_profile, va_entrypoint, &slice_attr, 1);
if (status != VA_STATUS_SUCCESS) {
// Assume only a single slice is supported
slice_attr.value = 1;
}
if (ctx->slices > slice_attr.value) {
BOOST_LOG(info) << "Limiting slice count to encoder maximum: "sv << slice_attr.value;
ctx->slices = slice_attr.value;
}
// Use VBR with a single frame VBV when the user forces it and for known good cases:
// - Intel GPUs
// - AV1
//
// VBR ensures the bitstream isn't full of filler data for bitrate undershoots and
// single frame VBV ensures that we don't have large bitrate overshoots (at least
// as much as they can be avoided without pre-analysis).
//
// When we have to resort to the default 1 second VBV for encoding quality reasons,
// we stick to CBR in order to avoid encoding huge frames after bitrate undershoots
// leave headroom available in the RC window.
if (config::video.vaapi.strict_rc_buffer ||
(vendor && strstr(vendor, "Intel")) ||
ctx->codec_id == AV_CODEC_ID_AV1) {
ctx->rc_buffer_size = ctx->bit_rate * ctx->framerate.den / ctx->framerate.num;
if (rc_attr.value & VA_RC_VBR) {
BOOST_LOG(info) << "Using VBR with single frame VBV size"sv;
av_dict_set(options, "rc_mode", "VBR", 0);
}
else if (rc_attr.value & VA_RC_CBR) {
BOOST_LOG(info) << "Using CBR with single frame VBV size"sv;
av_dict_set(options, "rc_mode", "CBR", 0);
}
else {
BOOST_LOG(warning) << "Using CQP with single frame VBV size"sv;
av_dict_set_int(options, "qp", config::video.qp, 0);
}
}
else if (!(rc_attr.value & (VA_RC_CBR | VA_RC_VBR))) {
BOOST_LOG(warning) << "Using CQP rate control"sv;
av_dict_set_int(options, "qp", config::video.qp, 0);
}
else {
BOOST_LOG(info) << "Using default rate control"sv;
}
}
int
set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx_buf) override {
this->hwframe.reset(frame);
this->frame = frame;
if (!frame->buf[0]) {
if (av_hwframe_get_buffer(hw_frames_ctx_buf, frame, 0)) {
BOOST_LOG(error) << "Couldn't get hwframe for VAAPI"sv;
return -1;
}
}
va::DRMPRIMESurfaceDescriptor prime;
va::VASurfaceID surface = (std::uintptr_t) frame->data[3];
auto hw_frames_ctx = (AVHWFramesContext *) hw_frames_ctx_buf->data;
auto status = vaExportSurfaceHandle(
this->va_display,
surface,
va::SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
va::EXPORT_SURFACE_WRITE_ONLY | va::EXPORT_SURFACE_SEPARATE_LAYERS,
&prime);
if (status) {
BOOST_LOG(error) << "Couldn't export va surface handle: ["sv << (int) surface << "]: "sv << vaErrorStr(status);
return -1;
}
// Keep track of file descriptors
std::array<file_t, egl::nv12_img_t::num_fds> fds;
for (int x = 0; x < prime.num_objects; ++x) {
fds[x] = prime.objects[x].fd;
}
if (prime.num_layers != 2) {
BOOST_LOG(error) << "Invalid layer count for VA surface: expected 2, got "sv << prime.num_layers;
return -1;
}
egl::surface_descriptor_t sds[2] = {};
for (int plane = 0; plane < 2; ++plane) {
auto &sd = sds[plane];
auto &layer = prime.layers[plane];
sd.fourcc = layer.drm_format;
// UV plane is subsampled
sd.width = prime.width / (plane == 0 ? 1 : 2);
sd.height = prime.height / (plane == 0 ? 1 : 2);
// The modifier must be the same for all planes
sd.modifier = prime.objects[layer.object_index[0]].drm_format_modifier;
std::fill_n(sd.fds, 4, -1);
for (int x = 0; x < layer.num_planes; ++x) {
sd.fds[x] = prime.objects[layer.object_index[x]].fd;
sd.pitches[x] = layer.pitch[x];
sd.offsets[x] = layer.offset[x];
}
}
auto nv12_opt = egl::import_target(display.get(), std::move(fds), sds[0], sds[1]);
if (!nv12_opt) {
return -1;
}
auto sws_opt = egl::sws_t::make(width, height, frame->width, frame->height, hw_frames_ctx->sw_format);
if (!sws_opt) {
return -1;
}
this->sws = std::move(*sws_opt);
this->nv12 = std::move(*nv12_opt);
return 0;
}
void
apply_colorspace() override {
sws.apply_colorspace(colorspace);
}
va::display_t::pointer va_display;
file_t file;
gbm::gbm_t gbm;
egl::display_t display;
egl::ctx_t ctx;
// This must be destroyed before display_t to ensure the GPU
// driver is still loaded when vaDestroySurfaces() is called.
frame_t hwframe;
egl::sws_t sws;
egl::nv12_t nv12;
int width, height;
};
class va_ram_t: public va_t {
public:
int
convert(platf::img_t &img) override {
sws.load_ram(img);
sws.convert(nv12->buf);
return 0;
}
};
class va_vram_t: public va_t {
public:
int
convert(platf::img_t &img) override {
auto &descriptor = (egl::img_descriptor_t &) img;
if (descriptor.sequence == 0) {
// For dummy images, use a blank RGB texture instead of importing a DMA-BUF
rgb = egl::create_blank(img);
}
else if (descriptor.sequence > sequence) {
sequence = descriptor.sequence;
rgb = egl::rgb_t {};
auto rgb_opt = egl::import_source(display.get(), descriptor.sd);
if (!rgb_opt) {
return -1;
}
rgb = std::move(*rgb_opt);
}
sws.load_vram(descriptor, offset_x, offset_y, rgb->tex[0]);
sws.convert(nv12->buf);
return 0;
}
int
init(int in_width, int in_height, file_t &&render_device, int offset_x, int offset_y) {
if (va_t::init(in_width, in_height, std::move(render_device))) {
return -1;
}
sequence = 0;
this->offset_x = offset_x;
this->offset_y = offset_y;
return 0;
}
std::uint64_t sequence;
egl::rgb_t rgb;
int offset_x, offset_y;
};
/**
* This is a private structure of FFmpeg, I need this to manually create
* a VAAPI hardware context
*
* xdisplay will not be used internally by FFmpeg
*/
typedef struct VAAPIDevicePriv {
union {
void *xdisplay;
int fd;
} drm;
int drm_fd;
} VAAPIDevicePriv;
/**
* VAAPI connection details.
*
* Allocated as AVHWDeviceContext.hwctx
*/
typedef struct AVVAAPIDeviceContext {
/**
* The VADisplay handle, to be filled by the user.
*/
va::VADisplay display;
/**
* Driver quirks to apply - this is filled by av_hwdevice_ctx_init(),
* with reference to a table of known drivers, unless the
* AV_VAAPI_DRIVER_QUIRK_USER_SET bit is already present. The user
* may need to refer to this field when performing any later
* operations using VAAPI with the same VADisplay.
*/
unsigned int driver_quirks;
} AVVAAPIDeviceContext;
static void
__log(void *level, const char *msg) {
BOOST_LOG(*(boost::log::sources::severity_logger<int> *) level) << msg;
}
static void
vaapi_hwdevice_ctx_free(AVHWDeviceContext *ctx) {
auto hwctx = (AVVAAPIDeviceContext *) ctx->hwctx;
auto priv = (VAAPIDevicePriv *) ctx->user_opaque;
vaTerminate(hwctx->display);
close(priv->drm_fd);
av_freep(&priv);
}
int
vaapi_init_avcodec_hardware_input_buffer(platf::avcodec_encode_device_t *base, AVBufferRef **hw_device_buf) {
auto va = (va::va_t *) base;
auto fd = dup(va->file.el);
auto *priv = (VAAPIDevicePriv *) av_mallocz(sizeof(VAAPIDevicePriv));
priv->drm_fd = fd;
auto fg = util::fail_guard([fd, priv]() {
close(fd);
av_free(priv);
});
va::display_t display { vaGetDisplayDRM(fd) };
if (!display) {
auto render_device = config::video.adapter_name.empty() ? "/dev/dri/renderD128" : config::video.adapter_name.c_str();
BOOST_LOG(error) << "Couldn't open a va display from DRM with device: "sv << render_device;
return -1;
}
va->va_display = display.get();
vaSetErrorCallback(display.get(), __log, &error);
vaSetErrorCallback(display.get(), __log, &info);
int major, minor;
auto status = vaInitialize(display.get(), &major, &minor);
if (status) {
BOOST_LOG(error) << "Couldn't initialize va display: "sv << vaErrorStr(status);
return -1;
}
BOOST_LOG(info) << "vaapi vendor: "sv << vaQueryVendorString(display.get());
*hw_device_buf = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_VAAPI);
auto ctx = (AVHWDeviceContext *) (*hw_device_buf)->data;
auto hwctx = (AVVAAPIDeviceContext *) ctx->hwctx;
// Ownership of the VADisplay and DRM fd is now ours to manage via the free() function
hwctx->display = display.release();
ctx->user_opaque = priv;
ctx->free = vaapi_hwdevice_ctx_free;
fg.disable();
auto err = av_hwdevice_ctx_init(*hw_device_buf);
if (err) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Failed to create FFMpeg hardware device context: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
return err;
}
return 0;
}
static bool
query(display_t::pointer display, VAProfile profile) {
std::vector<VAEntrypoint> entrypoints;
entrypoints.resize(vaMaxNumEntrypoints(display));
int count;
auto status = vaQueryConfigEntrypoints(display, profile, entrypoints.data(), &count);
if (status) {
BOOST_LOG(error) << "Couldn't query entrypoints: "sv << vaErrorStr(status);
return false;
}
entrypoints.resize(count);
for (auto entrypoint : entrypoints) {
if (entrypoint == VAEntrypointEncSlice || entrypoint == VAEntrypointEncSliceLP) {
return true;
}
}
return false;
}
bool
validate(int fd) {
va::display_t display { vaGetDisplayDRM(fd) };
if (!display) {
char string[1024];
auto bytes = readlink(("/proc/self/fd/" + std::to_string(fd)).c_str(), string, sizeof(string));
std::string_view render_device { string, (std::size_t) bytes };
BOOST_LOG(error) << "Couldn't open a va display from DRM with device: "sv << render_device;
return false;
}
int major, minor;
auto status = vaInitialize(display.get(), &major, &minor);
if (status) {
BOOST_LOG(error) << "Couldn't initialize va display: "sv << vaErrorStr(status);
return false;
}
if (!query(display.get(), VAProfileH264Main)) {
return false;
}
if (video::active_hevc_mode > 1 && !query(display.get(), VAProfileHEVCMain)) {
return false;
}
if (video::active_hevc_mode > 2 && !query(display.get(), VAProfileHEVCMain10)) {
return false;
}
return true;
}
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, file_t &&card, int offset_x, int offset_y, bool vram) {
if (vram) {
auto egl = std::make_unique<va::va_vram_t>();
if (egl->init(width, height, std::move(card), offset_x, offset_y)) {
return nullptr;
}
return egl;
}
else {
auto egl = std::make_unique<va::va_ram_t>();
if (egl->init(width, height, std::move(card))) {
return nullptr;
}
return egl;
}
}
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, int offset_x, int offset_y, bool vram) {
auto render_device = config::video.adapter_name.empty() ? "/dev/dri/renderD128" : config::video.adapter_name.c_str();
file_t file = open(render_device, O_RDWR);
if (file.el < 0) {
char string[1024];
BOOST_LOG(error) << "Couldn't open "sv << render_device << ": " << strerror_r(errno, string, sizeof(string));
return nullptr;
}
return make_avcodec_encode_device(width, height, std::move(file), offset_x, offset_y, vram);
}
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, bool vram) {
return make_avcodec_encode_device(width, height, 0, 0, vram);
}
} // namespace va
| 19,933
|
C++
|
.cpp
| 544
| 30.137868
| 144
| 0.627238
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,997
|
audio.cpp
|
LizardByte_Sunshine/src/platform/linux/audio.cpp
|
/**
* @file src/platform/linux/audio.cpp
* @brief Definitions for audio control on Linux.
*/
#include <bitset>
#include <sstream>
#include <thread>
#include <boost/regex.hpp>
#include <pulse/error.h>
#include <pulse/pulseaudio.h>
#include <pulse/simple.h>
#include "src/platform/common.h"
#include "src/config.h"
#include "src/logging.h"
#include "src/thread_safe.h"
namespace platf {
using namespace std::literals;
constexpr pa_channel_position_t position_mapping[] {
PA_CHANNEL_POSITION_FRONT_LEFT,
PA_CHANNEL_POSITION_FRONT_RIGHT,
PA_CHANNEL_POSITION_FRONT_CENTER,
PA_CHANNEL_POSITION_LFE,
PA_CHANNEL_POSITION_REAR_LEFT,
PA_CHANNEL_POSITION_REAR_RIGHT,
PA_CHANNEL_POSITION_SIDE_LEFT,
PA_CHANNEL_POSITION_SIDE_RIGHT,
};
std::string
to_string(const char *name, const std::uint8_t *mapping, int channels) {
std::stringstream ss;
ss << "rate=48000 sink_name="sv << name << " format=float channels="sv << channels << " channel_map="sv;
std::for_each_n(mapping, channels - 1, [&ss](std::uint8_t pos) {
ss << pa_channel_position_to_string(position_mapping[pos]) << ',';
});
ss << pa_channel_position_to_string(position_mapping[mapping[channels - 1]]);
ss << " sink_properties=device.description="sv << name;
auto result = ss.str();
BOOST_LOG(debug) << "null-sink args: "sv << result;
return result;
}
struct mic_attr_t: public mic_t {
util::safe_ptr<pa_simple, pa_simple_free> mic;
capture_e
sample(std::vector<float> &sample_buf) override {
auto sample_size = sample_buf.size();
auto buf = sample_buf.data();
int status;
if (pa_simple_read(mic.get(), buf, sample_size * sizeof(float), &status)) {
BOOST_LOG(error) << "pa_simple_read() failed: "sv << pa_strerror(status);
return capture_e::error;
}
return capture_e::ok;
}
};
std::unique_ptr<mic_t>
microphone(const std::uint8_t *mapping, int channels, std::uint32_t sample_rate, std::uint32_t frame_size, std::string source_name) {
auto mic = std::make_unique<mic_attr_t>();
pa_sample_spec ss { PA_SAMPLE_FLOAT32, sample_rate, (std::uint8_t) channels };
pa_channel_map pa_map;
pa_map.channels = channels;
std::for_each_n(pa_map.map, pa_map.channels, [mapping](auto &channel) mutable {
channel = position_mapping[*mapping++];
});
pa_buffer_attr pa_attr = {
.maxlength = uint32_t(-1),
.tlength = uint32_t(-1),
.prebuf = uint32_t(-1),
.minreq = uint32_t(-1),
.fragsize = uint32_t(frame_size * channels * sizeof(float))
};
int status;
mic->mic.reset(
pa_simple_new(nullptr, "sunshine",
pa_stream_direction_t::PA_STREAM_RECORD, source_name.c_str(),
"sunshine-record", &ss, &pa_map, &pa_attr, &status));
if (!mic->mic) {
auto err_str = pa_strerror(status);
BOOST_LOG(error) << "pa_simple_new() failed: "sv << err_str;
return nullptr;
}
return mic;
}
namespace pa {
template <bool B, class T>
struct add_const_helper;
template <class T>
struct add_const_helper<true, T> {
using type = const std::remove_pointer_t<T> *;
};
template <class T>
struct add_const_helper<false, T> {
using type = const T *;
};
template <class T>
using add_const_t = typename add_const_helper<std::is_pointer_v<T>, T>::type;
template <class T>
void
pa_free(T *p) {
pa_xfree(p);
}
using ctx_t = util::safe_ptr<pa_context, pa_context_unref>;
using loop_t = util::safe_ptr<pa_mainloop, pa_mainloop_free>;
using op_t = util::safe_ptr<pa_operation, pa_operation_unref>;
using string_t = util::safe_ptr<char, pa_free<char>>;
template <class T>
using cb_simple_t = std::function<void(ctx_t::pointer, add_const_t<T> i)>;
template <class T>
void
cb(ctx_t::pointer ctx, add_const_t<T> i, void *userdata) {
auto &f = *(cb_simple_t<T> *) userdata;
// Cannot similarly filter on eol here. Unless reported otherwise assume
// we have no need for special filtering like cb?
f(ctx, i);
}
template <class T>
using cb_t = std::function<void(ctx_t::pointer, add_const_t<T> i, int eol)>;
template <class T>
void
cb(ctx_t::pointer ctx, add_const_t<T> i, int eol, void *userdata) {
auto &f = *(cb_t<T> *) userdata;
// For some reason, pulseaudio calls this callback after disconnecting
if (i && eol) {
return;
}
f(ctx, i, eol);
}
void
cb_i(ctx_t::pointer ctx, std::uint32_t i, void *userdata) {
auto alarm = (safe::alarm_raw_t<int> *) userdata;
alarm->ring(i);
}
void
ctx_state_cb(ctx_t::pointer ctx, void *userdata) {
auto &f = *(std::function<void(ctx_t::pointer)> *) userdata;
f(ctx);
}
void
success_cb(ctx_t::pointer ctx, int status, void *userdata) {
assert(userdata != nullptr);
auto alarm = (safe::alarm_raw_t<int> *) userdata;
alarm->ring(status ? 0 : 1);
}
class server_t: public audio_control_t {
enum ctx_event_e : int {
ready,
terminated,
failed
};
public:
loop_t loop;
ctx_t ctx;
std::string requested_sink;
struct {
std::uint32_t stereo = PA_INVALID_INDEX;
std::uint32_t surround51 = PA_INVALID_INDEX;
std::uint32_t surround71 = PA_INVALID_INDEX;
} index;
std::unique_ptr<safe::event_t<ctx_event_e>> events;
std::unique_ptr<std::function<void(ctx_t::pointer)>> events_cb;
std::thread worker;
int
init() {
events = std::make_unique<safe::event_t<ctx_event_e>>();
loop.reset(pa_mainloop_new());
ctx.reset(pa_context_new(pa_mainloop_get_api(loop.get()), "sunshine"));
events_cb = std::make_unique<std::function<void(ctx_t::pointer)>>([this](ctx_t::pointer ctx) {
switch (pa_context_get_state(ctx)) {
case PA_CONTEXT_READY:
events->raise(ready);
break;
case PA_CONTEXT_TERMINATED:
BOOST_LOG(debug) << "Pulseadio context terminated"sv;
events->raise(terminated);
break;
case PA_CONTEXT_FAILED:
BOOST_LOG(debug) << "Pulseadio context failed"sv;
events->raise(failed);
break;
case PA_CONTEXT_CONNECTING:
BOOST_LOG(debug) << "Connecting to pulseaudio"sv;
case PA_CONTEXT_UNCONNECTED:
case PA_CONTEXT_AUTHORIZING:
case PA_CONTEXT_SETTING_NAME:
break;
}
});
pa_context_set_state_callback(ctx.get(), ctx_state_cb, events_cb.get());
auto status = pa_context_connect(ctx.get(), nullptr, PA_CONTEXT_NOFLAGS, nullptr);
if (status) {
BOOST_LOG(error) << "Couldn't connect to pulseaudio: "sv << pa_strerror(status);
return -1;
}
worker = std::thread {
[](loop_t::pointer loop) {
int retval;
auto status = pa_mainloop_run(loop, &retval);
if (status < 0) {
BOOST_LOG(error) << "Couldn't run pulseaudio main loop"sv;
return;
}
},
loop.get()
};
auto event = events->pop();
if (event == failed) {
return -1;
}
return 0;
}
int
load_null(const char *name, const std::uint8_t *channel_mapping, int channels) {
auto alarm = safe::make_alarm<int>();
op_t op {
pa_context_load_module(
ctx.get(),
"module-null-sink",
to_string(name, channel_mapping, channels).c_str(),
cb_i,
alarm.get()),
};
alarm->wait();
return *alarm->status();
}
int
unload_null(std::uint32_t i) {
if (i == PA_INVALID_INDEX) {
return 0;
}
auto alarm = safe::make_alarm<int>();
op_t op {
pa_context_unload_module(ctx.get(), i, success_cb, alarm.get())
};
alarm->wait();
if (*alarm->status()) {
BOOST_LOG(error) << "Couldn't unload null-sink with index ["sv << i << "]: "sv << pa_strerror(pa_context_errno(ctx.get()));
return -1;
}
return 0;
}
std::optional<sink_t>
sink_info() override {
constexpr auto stereo = "sink-sunshine-stereo";
constexpr auto surround51 = "sink-sunshine-surround51";
constexpr auto surround71 = "sink-sunshine-surround71";
auto alarm = safe::make_alarm<int>();
sink_t sink;
// Count of all virtual sinks that are created by us
int nullcount = 0;
cb_t<pa_sink_info *> f = [&](ctx_t::pointer ctx, const pa_sink_info *sink_info, int eol) {
if (!sink_info) {
if (!eol) {
BOOST_LOG(error) << "Couldn't get pulseaudio sink info: "sv << pa_strerror(pa_context_errno(ctx));
alarm->ring(-1);
}
alarm->ring(0);
return;
}
// Ensure Sunshine won't create a sink that already exists.
if (!std::strcmp(sink_info->name, stereo)) {
index.stereo = sink_info->owner_module;
++nullcount;
}
else if (!std::strcmp(sink_info->name, surround51)) {
index.surround51 = sink_info->owner_module;
++nullcount;
}
else if (!std::strcmp(sink_info->name, surround71)) {
index.surround71 = sink_info->owner_module;
++nullcount;
}
};
op_t op { pa_context_get_sink_info_list(ctx.get(), cb<pa_sink_info *>, &f) };
if (!op) {
BOOST_LOG(error) << "Couldn't create card info operation: "sv << pa_strerror(pa_context_errno(ctx.get()));
return std::nullopt;
}
alarm->wait();
if (*alarm->status()) {
return std::nullopt;
}
auto sink_name = get_default_sink_name();
sink.host = sink_name;
if (index.stereo == PA_INVALID_INDEX) {
index.stereo = load_null(stereo, speaker::map_stereo, sizeof(speaker::map_stereo));
if (index.stereo == PA_INVALID_INDEX) {
BOOST_LOG(warning) << "Couldn't create virtual sink for stereo: "sv << pa_strerror(pa_context_errno(ctx.get()));
}
else {
++nullcount;
}
}
if (index.surround51 == PA_INVALID_INDEX) {
index.surround51 = load_null(surround51, speaker::map_surround51, sizeof(speaker::map_surround51));
if (index.surround51 == PA_INVALID_INDEX) {
BOOST_LOG(warning) << "Couldn't create virtual sink for surround-51: "sv << pa_strerror(pa_context_errno(ctx.get()));
}
else {
++nullcount;
}
}
if (index.surround71 == PA_INVALID_INDEX) {
index.surround71 = load_null(surround71, speaker::map_surround71, sizeof(speaker::map_surround71));
if (index.surround71 == PA_INVALID_INDEX) {
BOOST_LOG(warning) << "Couldn't create virtual sink for surround-71: "sv << pa_strerror(pa_context_errno(ctx.get()));
}
else {
++nullcount;
}
}
if (sink_name.empty()) {
BOOST_LOG(warning) << "Couldn't find an active default sink. Continuing with virtual audio only."sv;
}
if (nullcount == 3) {
sink.null = std::make_optional(sink_t::null_t { stereo, surround51, surround71 });
}
return std::make_optional(std::move(sink));
}
std::string
get_default_sink_name() {
std::string sink_name;
auto alarm = safe::make_alarm<int>();
cb_simple_t<pa_server_info *> server_f = [&](ctx_t::pointer ctx, const pa_server_info *server_info) {
if (!server_info) {
BOOST_LOG(error) << "Couldn't get pulseaudio server info: "sv << pa_strerror(pa_context_errno(ctx));
alarm->ring(-1);
}
if (server_info->default_sink_name) {
sink_name = server_info->default_sink_name;
}
alarm->ring(0);
};
op_t server_op { pa_context_get_server_info(ctx.get(), cb<pa_server_info *>, &server_f) };
alarm->wait();
// No need to check status. If it failed just return default name.
return sink_name;
}
std::string
get_monitor_name(const std::string &sink_name) {
std::string monitor_name;
auto alarm = safe::make_alarm<int>();
if (sink_name.empty()) {
return monitor_name;
}
cb_t<pa_sink_info *> sink_f = [&](ctx_t::pointer ctx, const pa_sink_info *sink_info, int eol) {
if (!sink_info) {
if (!eol) {
BOOST_LOG(error) << "Couldn't get pulseaudio sink info for ["sv << sink_name
<< "]: "sv << pa_strerror(pa_context_errno(ctx));
alarm->ring(-1);
}
alarm->ring(0);
return;
}
monitor_name = sink_info->monitor_source_name;
};
op_t sink_op { pa_context_get_sink_info_by_name(ctx.get(), sink_name.c_str(), cb<pa_sink_info *>, &sink_f) };
alarm->wait();
// No need to check status. If it failed just return default name.
BOOST_LOG(info) << "Found default monitor by name: "sv << monitor_name;
return monitor_name;
}
std::unique_ptr<mic_t>
microphone(const std::uint8_t *mapping, int channels, std::uint32_t sample_rate, std::uint32_t frame_size) override {
// Sink choice priority:
// 1. Config sink
// 2. Last sink swapped to (Usually virtual in this case)
// 3. Default Sink
// An attempt was made to always use default to match the switching mechanic,
// but this happens right after the swap so the default returned by PA was not
// the new one just set!
auto sink_name = config::audio.sink;
if (sink_name.empty()) sink_name = requested_sink;
if (sink_name.empty()) sink_name = get_default_sink_name();
return ::platf::microphone(mapping, channels, sample_rate, frame_size, get_monitor_name(sink_name));
}
int
set_sink(const std::string &sink) override {
auto alarm = safe::make_alarm<int>();
BOOST_LOG(info) << "Setting default sink to: ["sv << sink << "]"sv;
op_t op {
pa_context_set_default_sink(
ctx.get(), sink.c_str(), success_cb, alarm.get()),
};
if (!op) {
BOOST_LOG(error) << "Couldn't create set default-sink operation: "sv << pa_strerror(pa_context_errno(ctx.get()));
return -1;
}
alarm->wait();
if (*alarm->status()) {
BOOST_LOG(error) << "Couldn't set default-sink ["sv << sink << "]: "sv << pa_strerror(pa_context_errno(ctx.get()));
return -1;
}
requested_sink = sink;
return 0;
}
~server_t() override {
unload_null(index.stereo);
unload_null(index.surround51);
unload_null(index.surround71);
if (worker.joinable()) {
pa_context_disconnect(ctx.get());
KITTY_WHILE_LOOP(auto event = events->pop(), event != terminated && event != failed, {
event = events->pop();
})
pa_mainloop_quit(loop.get(), 0);
worker.join();
}
}
};
} // namespace pa
std::unique_ptr<audio_control_t>
audio_control() {
auto audio = std::make_unique<pa::server_t>();
if (audio->init()) {
return nullptr;
}
return audio;
}
} // namespace platf
| 15,903
|
C++
|
.cpp
| 419
| 29.379475
| 135
| 0.5726
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,998
|
wayland.cpp
|
LizardByte_Sunshine/src/platform/linux/wayland.cpp
|
/**
* @file src/platform/linux/wayland.cpp
* @brief Definitions for Wayland capture.
*/
#include <poll.h>
#include <wayland-client.h>
#include <wayland-util.h>
#include <cstdlib>
#include "graphics.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/round_robin.h"
#include "src/utility.h"
#include "wayland.h"
extern const wl_interface wl_output_interface;
using namespace std::literals;
// Disable warning for converting incompatible functions
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
#pragma GCC diagnostic ignored "-Wpmf-conversions"
namespace wl {
// Helper to call C++ method from wayland C callback
template <class T, class Method, Method m, class... Params>
static auto
classCall(void *data, Params... params) -> decltype(((*reinterpret_cast<T *>(data)).*m)(params...)) {
return ((*reinterpret_cast<T *>(data)).*m)(params...);
}
#define CLASS_CALL(c, m) classCall<c, decltype(&c::m), &c::m>
int
display_t::init(const char *display_name) {
if (!display_name) {
display_name = std::getenv("WAYLAND_DISPLAY");
}
if (!display_name) {
BOOST_LOG(error) << "Environment variable WAYLAND_DISPLAY has not been defined"sv;
return -1;
}
display_internal.reset(wl_display_connect(display_name));
if (!display_internal) {
BOOST_LOG(error) << "Couldn't connect to Wayland display: "sv << display_name;
return -1;
}
BOOST_LOG(info) << "Found display ["sv << display_name << ']';
return 0;
}
void
display_t::roundtrip() {
wl_display_roundtrip(display_internal.get());
}
/**
* @brief Waits up to the specified timeout to dispatch new events on the wl_display.
* @param timeout The timeout in milliseconds.
* @return `true` if new events were dispatched or `false` if the timeout expired.
*/
bool
display_t::dispatch(std::chrono::milliseconds timeout) {
// Check if any events are queued already. If not, flush
// outgoing events, and prepare to wait for readability.
if (wl_display_prepare_read(display_internal.get()) == 0) {
wl_display_flush(display_internal.get());
// Wait for an event to come in
struct pollfd pfd = {};
pfd.fd = wl_display_get_fd(display_internal.get());
pfd.events = POLLIN;
if (poll(&pfd, 1, timeout.count()) == 1 && (pfd.revents & POLLIN)) {
// Read the new event(s)
wl_display_read_events(display_internal.get());
}
else {
// We timed out, so unlock the queue now
wl_display_cancel_read(display_internal.get());
return false;
}
}
// Dispatch any existing or new pending events
wl_display_dispatch_pending(display_internal.get());
return true;
}
wl_registry *
display_t::registry() {
return wl_display_get_registry(display_internal.get());
}
inline monitor_t::monitor_t(wl_output *output):
output { output },
wl_listener {
&CLASS_CALL(monitor_t, wl_geometry),
&CLASS_CALL(monitor_t, wl_mode),
&CLASS_CALL(monitor_t, wl_done),
&CLASS_CALL(monitor_t, wl_scale),
},
xdg_listener {
&CLASS_CALL(monitor_t, xdg_position),
&CLASS_CALL(monitor_t, xdg_size),
&CLASS_CALL(monitor_t, xdg_done),
&CLASS_CALL(monitor_t, xdg_name),
&CLASS_CALL(monitor_t, xdg_description)
} {}
inline void
monitor_t::xdg_name(zxdg_output_v1 *, const char *name) {
this->name = name;
BOOST_LOG(info) << "Name: "sv << this->name;
}
void
monitor_t::xdg_description(zxdg_output_v1 *, const char *description) {
this->description = description;
BOOST_LOG(info) << "Found monitor: "sv << this->description;
}
void
monitor_t::xdg_position(zxdg_output_v1 *, std::int32_t x, std::int32_t y) {
viewport.offset_x = x;
viewport.offset_y = y;
BOOST_LOG(info) << "Offset: "sv << x << 'x' << y;
}
void
monitor_t::xdg_size(zxdg_output_v1 *, std::int32_t width, std::int32_t height) {
BOOST_LOG(info) << "Logical size: "sv << width << 'x' << height;
}
void
monitor_t::wl_mode(wl_output *wl_output, std::uint32_t flags,
std::int32_t width, std::int32_t height, std::int32_t refresh) {
viewport.width = width;
viewport.height = height;
BOOST_LOG(info) << "Resolution: "sv << width << 'x' << height;
}
void
monitor_t::listen(zxdg_output_manager_v1 *output_manager) {
auto xdg_output = zxdg_output_manager_v1_get_xdg_output(output_manager, output);
zxdg_output_v1_add_listener(xdg_output, &xdg_listener, this);
wl_output_add_listener(output, &wl_listener, this);
}
interface_t::interface_t() noexcept
:
output_manager { nullptr },
listener {
&CLASS_CALL(interface_t, add_interface),
&CLASS_CALL(interface_t, del_interface)
} {}
void
interface_t::listen(wl_registry *registry) {
wl_registry_add_listener(registry, &listener, this);
}
void
interface_t::add_interface(wl_registry *registry, std::uint32_t id, const char *interface, std::uint32_t version) {
BOOST_LOG(debug) << "Available interface: "sv << interface << '(' << id << ") version "sv << version;
if (!std::strcmp(interface, wl_output_interface.name)) {
BOOST_LOG(info) << "Found interface: "sv << interface << '(' << id << ") version "sv << version;
monitors.emplace_back(
std::make_unique<monitor_t>(
(wl_output *) wl_registry_bind(registry, id, &wl_output_interface, 2)));
}
else if (!std::strcmp(interface, zxdg_output_manager_v1_interface.name)) {
BOOST_LOG(info) << "Found interface: "sv << interface << '(' << id << ") version "sv << version;
output_manager = (zxdg_output_manager_v1 *) wl_registry_bind(registry, id, &zxdg_output_manager_v1_interface, version);
this->interface[XDG_OUTPUT] = true;
}
else if (!std::strcmp(interface, zwlr_export_dmabuf_manager_v1_interface.name)) {
BOOST_LOG(info) << "Found interface: "sv << interface << '(' << id << ") version "sv << version;
dmabuf_manager = (zwlr_export_dmabuf_manager_v1 *) wl_registry_bind(registry, id, &zwlr_export_dmabuf_manager_v1_interface, version);
this->interface[WLR_EXPORT_DMABUF] = true;
}
}
void
interface_t::del_interface(wl_registry *registry, uint32_t id) {
BOOST_LOG(info) << "Delete: "sv << id;
}
dmabuf_t::dmabuf_t():
status { READY }, frames {}, current_frame { &frames[0] }, listener {
&CLASS_CALL(dmabuf_t, frame),
&CLASS_CALL(dmabuf_t, object),
&CLASS_CALL(dmabuf_t, ready),
&CLASS_CALL(dmabuf_t, cancel)
} {
}
void
dmabuf_t::listen(zwlr_export_dmabuf_manager_v1 *dmabuf_manager, wl_output *output, bool blend_cursor) {
auto frame = zwlr_export_dmabuf_manager_v1_capture_output(dmabuf_manager, blend_cursor, output);
zwlr_export_dmabuf_frame_v1_add_listener(frame, &listener, this);
status = WAITING;
}
dmabuf_t::~dmabuf_t() {
for (auto &frame : frames) {
frame.destroy();
}
}
void
dmabuf_t::frame(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t width, std::uint32_t height,
std::uint32_t x, std::uint32_t y,
std::uint32_t buffer_flags, std::uint32_t flags,
std::uint32_t format,
std::uint32_t high, std::uint32_t low,
std::uint32_t obj_count) {
auto next_frame = get_next_frame();
next_frame->sd.fourcc = format;
next_frame->sd.width = width;
next_frame->sd.height = height;
next_frame->sd.modifier = (((std::uint64_t) high) << 32) | low;
}
void
dmabuf_t::object(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t index,
std::int32_t fd,
std::uint32_t size,
std::uint32_t offset,
std::uint32_t stride,
std::uint32_t plane_index) {
auto next_frame = get_next_frame();
next_frame->sd.fds[plane_index] = fd;
next_frame->sd.pitches[plane_index] = stride;
next_frame->sd.offsets[plane_index] = offset;
}
void
dmabuf_t::ready(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t tv_sec_hi, std::uint32_t tv_sec_lo, std::uint32_t tv_nsec) {
zwlr_export_dmabuf_frame_v1_destroy(frame);
current_frame->destroy();
current_frame = get_next_frame();
status = READY;
}
void
dmabuf_t::cancel(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t reason) {
zwlr_export_dmabuf_frame_v1_destroy(frame);
auto next_frame = get_next_frame();
next_frame->destroy();
status = REINIT;
}
void
frame_t::destroy() {
for (auto x = 0; x < 4; ++x) {
if (sd.fds[x] >= 0) {
close(sd.fds[x]);
sd.fds[x] = -1;
}
}
}
frame_t::frame_t() {
// File descriptors aren't open
std::fill_n(sd.fds, 4, -1);
};
std::vector<std::unique_ptr<monitor_t>>
monitors(const char *display_name) {
display_t display;
if (display.init(display_name)) {
return {};
}
interface_t interface;
interface.listen(display.registry());
display.roundtrip();
if (!interface[interface_t::XDG_OUTPUT]) {
BOOST_LOG(error) << "Missing Wayland wire XDG_OUTPUT"sv;
return {};
}
for (auto &monitor : interface.monitors) {
monitor->listen(interface.output_manager);
}
display.roundtrip();
return std::move(interface.monitors);
}
static bool
validate() {
display_t display;
return display.init() == 0;
}
int
init() {
static bool validated = validate();
return !validated;
}
} // namespace wl
#pragma GCC diagnostic pop
| 9,594
|
C++
|
.cpp
| 275
| 29.934545
| 139
| 0.64552
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,999
|
cuda.cpp
|
LizardByte_Sunshine/src/platform/linux/cuda.cpp
|
/**
* @file src/platform/linux/cuda.cpp
* @brief Definitions for CUDA encoding.
*/
#include <bitset>
#include <fcntl.h>
#include <filesystem>
#include <thread>
#include <NvFBC.h>
#include <ffnvcodec/dynlink_loader.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext_cuda.h>
#include <libavutil/imgutils.h>
}
#include "cuda.h"
#include "graphics.h"
#include "src/logging.h"
#include "src/utility.h"
#include "src/video.h"
#include "wayland.h"
#define SUNSHINE_STRINGVIEW_HELPER(x) x##sv
#define SUNSHINE_STRINGVIEW(x) SUNSHINE_STRINGVIEW_HELPER(x)
#define CU_CHECK(x, y) \
if (check((x), SUNSHINE_STRINGVIEW(y ": "))) return -1
#define CU_CHECK_IGNORE(x, y) \
check((x), SUNSHINE_STRINGVIEW(y ": "))
namespace fs = std::filesystem;
using namespace std::literals;
namespace cuda {
constexpr auto cudaDevAttrMaxThreadsPerBlock = (CUdevice_attribute) 1;
constexpr auto cudaDevAttrMaxThreadsPerMultiProcessor = (CUdevice_attribute) 39;
void
pass_error(const std::string_view &sv, const char *name, const char *description) {
BOOST_LOG(error) << sv << name << ':' << description;
}
void
cff(CudaFunctions *cf) {
cuda_free_functions(&cf);
}
using cdf_t = util::safe_ptr<CudaFunctions, cff>;
static cdf_t cdf;
inline static int
check(CUresult result, const std::string_view &sv) {
if (result != CUDA_SUCCESS) {
const char *name;
const char *description;
cdf->cuGetErrorName(result, &name);
cdf->cuGetErrorString(result, &description);
BOOST_LOG(error) << sv << name << ':' << description;
return -1;
}
return 0;
}
void
freeStream(CUstream stream) {
CU_CHECK_IGNORE(cdf->cuStreamDestroy(stream), "Couldn't destroy cuda stream");
}
void
unregisterResource(CUgraphicsResource resource) {
CU_CHECK_IGNORE(cdf->cuGraphicsUnregisterResource(resource), "Couldn't unregister resource");
}
using registered_resource_t = util::safe_ptr<CUgraphicsResource_st, unregisterResource>;
class img_t: public platf::img_t {
public:
tex_t tex;
};
int
init() {
auto status = cuda_load_functions(&cdf, nullptr);
if (status) {
BOOST_LOG(error) << "Couldn't load cuda: "sv << status;
return -1;
}
CU_CHECK(cdf->cuInit(0), "Couldn't initialize cuda");
return 0;
}
class cuda_t: public platf::avcodec_encode_device_t {
public:
int
init(int in_width, int in_height) {
if (!cdf) {
BOOST_LOG(warning) << "cuda not initialized"sv;
return -1;
}
data = (void *) 0x1;
width = in_width;
height = in_height;
return 0;
}
int
set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx) override {
this->hwframe.reset(frame);
this->frame = frame;
auto hwframe_ctx = (AVHWFramesContext *) hw_frames_ctx->data;
if (hwframe_ctx->sw_format != AV_PIX_FMT_NV12) {
BOOST_LOG(error) << "cuda::cuda_t doesn't support any format other than AV_PIX_FMT_NV12"sv;
return -1;
}
if (!frame->buf[0]) {
if (av_hwframe_get_buffer(hw_frames_ctx, frame, 0)) {
BOOST_LOG(error) << "Couldn't get hwframe for NVENC"sv;
return -1;
}
}
auto cuda_ctx = (AVCUDADeviceContext *) hwframe_ctx->device_ctx->hwctx;
stream = make_stream();
if (!stream) {
return -1;
}
cuda_ctx->stream = stream.get();
auto sws_opt = sws_t::make(width, height, frame->width, frame->height, width * 4);
if (!sws_opt) {
return -1;
}
sws = std::move(*sws_opt);
linear_interpolation = width != frame->width || height != frame->height;
return 0;
}
void
apply_colorspace() override {
sws.apply_colorspace(colorspace);
auto tex = tex_t::make(height, width * 4);
if (!tex) {
return;
}
// The default green color is ugly.
// Update the background color
platf::img_t img;
img.width = width;
img.height = height;
img.pixel_pitch = 4;
img.row_pitch = img.width * img.pixel_pitch;
std::vector<std::uint8_t> image_data;
image_data.resize(img.row_pitch * img.height);
img.data = image_data.data();
if (sws.load_ram(img, tex->array)) {
return;
}
sws.convert(frame->data[0], frame->data[1], frame->linesize[0], frame->linesize[1], tex->texture.linear, stream.get(), { frame->width, frame->height, 0, 0 });
}
cudaTextureObject_t
tex_obj(const tex_t &tex) const {
return linear_interpolation ? tex.texture.linear : tex.texture.point;
}
stream_t stream;
frame_t hwframe;
int width, height;
// When height and width don't change, it's not necessary to use linear interpolation
bool linear_interpolation;
sws_t sws;
};
class cuda_ram_t: public cuda_t {
public:
int
convert(platf::img_t &img) override {
return sws.load_ram(img, tex.array) || sws.convert(frame->data[0], frame->data[1], frame->linesize[0], frame->linesize[1], tex_obj(tex), stream.get());
}
int
set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx) {
if (cuda_t::set_frame(frame, hw_frames_ctx)) {
return -1;
}
auto tex_opt = tex_t::make(height, width * 4);
if (!tex_opt) {
return -1;
}
tex = std::move(*tex_opt);
return 0;
}
tex_t tex;
};
class cuda_vram_t: public cuda_t {
public:
int
convert(platf::img_t &img) override {
return sws.convert(frame->data[0], frame->data[1], frame->linesize[0], frame->linesize[1], tex_obj(((img_t *) &img)->tex), stream.get());
}
};
/**
* @brief Opens the DRM device associated with the CUDA device index.
* @param index CUDA device index to open.
* @return File descriptor or -1 on failure.
*/
file_t
open_drm_fd_for_cuda_device(int index) {
CUdevice device;
CU_CHECK(cdf->cuDeviceGet(&device, index), "Couldn't get CUDA device");
// There's no way to directly go from CUDA to a DRM device, so we'll
// use sysfs to look up the DRM device name from the PCI ID.
std::array<char, 13> pci_bus_id;
CU_CHECK(cdf->cuDeviceGetPCIBusId(pci_bus_id.data(), pci_bus_id.size(), device), "Couldn't get CUDA device PCI bus ID");
BOOST_LOG(debug) << "Found CUDA device with PCI bus ID: "sv << pci_bus_id.data();
// Linux uses lowercase hexadecimal while CUDA uses uppercase
std::transform(pci_bus_id.begin(), pci_bus_id.end(), pci_bus_id.begin(),
[](char c) { return std::tolower(c); });
// Look for the name of the primary node in sysfs
try {
char sysfs_path[PATH_MAX];
std::snprintf(sysfs_path, sizeof(sysfs_path), "/sys/bus/pci/devices/%s/drm", pci_bus_id.data());
fs::path sysfs_dir { sysfs_path };
for (auto &entry : fs::directory_iterator { sysfs_dir }) {
auto file = entry.path().filename();
auto filestring = file.generic_string();
if (std::string_view { filestring }.substr(0, 4) != "card"sv) {
continue;
}
BOOST_LOG(debug) << "Found DRM primary node: "sv << filestring;
fs::path dri_path { "/dev/dri"sv };
auto device_path = dri_path / file;
return open(device_path.c_str(), O_RDWR);
}
}
catch (const std::filesystem::filesystem_error &err) {
BOOST_LOG(error) << "Failed to read sysfs: "sv << err.what();
}
BOOST_LOG(error) << "Unable to find DRM device with PCI bus ID: "sv << pci_bus_id.data();
return -1;
}
class gl_cuda_vram_t: public platf::avcodec_encode_device_t {
public:
/**
* @brief Initialize the GL->CUDA encoding device.
* @param in_width Width of captured frames.
* @param in_height Height of captured frames.
* @param offset_x Offset of content in captured frame.
* @param offset_y Offset of content in captured frame.
* @return 0 on success or -1 on failure.
*/
int
init(int in_width, int in_height, int offset_x, int offset_y) {
// This must be non-zero to tell the video core that it's a hardware encoding device.
data = (void *) 0x1;
// TODO: Support more than one CUDA device
file = std::move(open_drm_fd_for_cuda_device(0));
if (file.el < 0) {
char string[1024];
BOOST_LOG(error) << "Couldn't open DRM FD for CUDA device: "sv << strerror_r(errno, string, sizeof(string));
return -1;
}
gbm.reset(gbm::create_device(file.el));
if (!gbm) {
BOOST_LOG(error) << "Couldn't create GBM device: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return -1;
}
display = egl::make_display(gbm.get());
if (!display) {
return -1;
}
auto ctx_opt = egl::make_ctx(display.get());
if (!ctx_opt) {
return -1;
}
ctx = std::move(*ctx_opt);
width = in_width;
height = in_height;
sequence = 0;
this->offset_x = offset_x;
this->offset_y = offset_y;
return 0;
}
/**
* @brief Initialize color conversion into target CUDA frame.
* @param frame Destination CUDA frame to write into.
* @param hw_frames_ctx_buf FFmpeg hardware frame context.
* @return 0 on success or -1 on failure.
*/
int
set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx_buf) override {
this->hwframe.reset(frame);
this->frame = frame;
if (!frame->buf[0]) {
if (av_hwframe_get_buffer(hw_frames_ctx_buf, frame, 0)) {
BOOST_LOG(error) << "Couldn't get hwframe for VAAPI"sv;
return -1;
}
}
auto hw_frames_ctx = (AVHWFramesContext *) hw_frames_ctx_buf->data;
sw_format = hw_frames_ctx->sw_format;
auto nv12_opt = egl::create_target(frame->width, frame->height, sw_format);
if (!nv12_opt) {
return -1;
}
auto sws_opt = egl::sws_t::make(width, height, frame->width, frame->height, sw_format);
if (!sws_opt) {
return -1;
}
this->sws = std::move(*sws_opt);
this->nv12 = std::move(*nv12_opt);
auto cuda_ctx = (AVCUDADeviceContext *) hw_frames_ctx->device_ctx->hwctx;
stream = make_stream();
if (!stream) {
return -1;
}
cuda_ctx->stream = stream.get();
CU_CHECK(cdf->cuGraphicsGLRegisterImage(&y_res, nv12->tex[0], GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY),
"Couldn't register Y plane texture");
CU_CHECK(cdf->cuGraphicsGLRegisterImage(&uv_res, nv12->tex[1], GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY),
"Couldn't register UV plane texture");
return 0;
}
/**
* @brief Convert the captured image into the target CUDA frame.
* @param img Captured screen image.
* @return 0 on success or -1 on failure.
*/
int
convert(platf::img_t &img) override {
auto &descriptor = (egl::img_descriptor_t &) img;
if (descriptor.sequence == 0) {
// For dummy images, use a blank RGB texture instead of importing a DMA-BUF
rgb = egl::create_blank(img);
}
else if (descriptor.sequence > sequence) {
sequence = descriptor.sequence;
rgb = egl::rgb_t {};
auto rgb_opt = egl::import_source(display.get(), descriptor.sd);
if (!rgb_opt) {
return -1;
}
rgb = std::move(*rgb_opt);
}
// Perform the color conversion and scaling in GL
sws.load_vram(descriptor, offset_x, offset_y, rgb->tex[0]);
sws.convert(nv12->buf);
auto fmt_desc = av_pix_fmt_desc_get(sw_format);
// Map the GL textures to read for CUDA
CUgraphicsResource resources[2] = { y_res.get(), uv_res.get() };
CU_CHECK(cdf->cuGraphicsMapResources(2, resources, stream.get()), "Couldn't map GL textures in CUDA");
// Copy from the GL textures to the target CUDA frame
for (int i = 0; i < 2; i++) {
CUDA_MEMCPY2D cpy = {};
cpy.srcMemoryType = CU_MEMORYTYPE_ARRAY;
CU_CHECK(cdf->cuGraphicsSubResourceGetMappedArray(&cpy.srcArray, resources[i], 0, 0), "Couldn't get mapped plane array");
cpy.dstMemoryType = CU_MEMORYTYPE_DEVICE;
cpy.dstDevice = (CUdeviceptr) frame->data[i];
cpy.dstPitch = frame->linesize[i];
cpy.WidthInBytes = (frame->width * fmt_desc->comp[i].step) >> (i ? fmt_desc->log2_chroma_w : 0);
cpy.Height = frame->height >> (i ? fmt_desc->log2_chroma_h : 0);
CU_CHECK_IGNORE(cdf->cuMemcpy2DAsync(&cpy, stream.get()), "Couldn't copy texture to CUDA frame");
}
// Unmap the textures to allow modification from GL again
CU_CHECK(cdf->cuGraphicsUnmapResources(2, resources, stream.get()), "Couldn't unmap GL textures from CUDA");
return 0;
}
/**
* @brief Configures shader parameters for the specified colorspace.
*/
void
apply_colorspace() override {
sws.apply_colorspace(colorspace);
}
file_t file;
gbm::gbm_t gbm;
egl::display_t display;
egl::ctx_t ctx;
// This must be destroyed before display_t
stream_t stream;
frame_t hwframe;
egl::sws_t sws;
egl::nv12_t nv12;
AVPixelFormat sw_format;
int width, height;
std::uint64_t sequence;
egl::rgb_t rgb;
registered_resource_t y_res;
registered_resource_t uv_res;
int offset_x, offset_y;
};
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, bool vram) {
if (init()) {
return nullptr;
}
std::unique_ptr<cuda_t> cuda;
if (vram) {
cuda = std::make_unique<cuda_vram_t>();
}
else {
cuda = std::make_unique<cuda_ram_t>();
}
if (cuda->init(width, height)) {
return nullptr;
}
return cuda;
}
/**
* @brief Create a GL->CUDA encoding device for consuming captured dmabufs.
* @param width Width of captured frames.
* @param height Height of captured frames.
* @param offset_x Offset of content in captured frame.
* @param offset_y Offset of content in captured frame.
* @return FFmpeg encoding device context.
*/
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_gl_encode_device(int width, int height, int offset_x, int offset_y) {
if (init()) {
return nullptr;
}
auto cuda = std::make_unique<gl_cuda_vram_t>();
if (cuda->init(width, height, offset_x, offset_y)) {
return nullptr;
}
return cuda;
}
namespace nvfbc {
static PNVFBCCREATEINSTANCE createInstance {};
static NVFBC_API_FUNCTION_LIST func { NVFBC_VERSION };
static constexpr inline NVFBC_BOOL
nv_bool(bool b) {
return b ? NVFBC_TRUE : NVFBC_FALSE;
}
static void *handle { nullptr };
int
init() {
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libnvidia-fbc.so.1", "libnvidia-fbc.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &createInstance, "NvFBCCreateInstance" },
};
if (dyn::load(handle, funcs)) {
dlclose(handle);
handle = nullptr;
return -1;
}
auto status = cuda::nvfbc::createInstance(&cuda::nvfbc::func);
if (status) {
BOOST_LOG(error) << "Unable to create NvFBC instance"sv;
dlclose(handle);
handle = nullptr;
return -1;
}
funcs_loaded = true;
return 0;
}
class ctx_t {
public:
ctx_t(NVFBC_SESSION_HANDLE handle) {
NVFBC_BIND_CONTEXT_PARAMS params { NVFBC_BIND_CONTEXT_PARAMS_VER };
if (func.nvFBCBindContext(handle, ¶ms)) {
BOOST_LOG(error) << "Couldn't bind NvFBC context to current thread: " << func.nvFBCGetLastErrorStr(handle);
}
this->handle = handle;
}
~ctx_t() {
NVFBC_RELEASE_CONTEXT_PARAMS params { NVFBC_RELEASE_CONTEXT_PARAMS_VER };
if (func.nvFBCReleaseContext(handle, ¶ms)) {
BOOST_LOG(error) << "Couldn't release NvFBC context from current thread: " << func.nvFBCGetLastErrorStr(handle);
}
}
NVFBC_SESSION_HANDLE handle;
};
class handle_t {
enum flag_e {
SESSION_HANDLE,
SESSION_CAPTURE,
MAX_FLAGS,
};
public:
handle_t() = default;
handle_t(handle_t &&other):
handle_flags { other.handle_flags }, handle { other.handle } {
other.handle_flags.reset();
}
handle_t &
operator=(handle_t &&other) {
std::swap(handle_flags, other.handle_flags);
std::swap(handle, other.handle);
return *this;
}
static std::optional<handle_t>
make() {
NVFBC_CREATE_HANDLE_PARAMS params { NVFBC_CREATE_HANDLE_PARAMS_VER };
// Set privateData to allow NvFBC on consumer NVIDIA GPUs.
// Based on https://github.com/keylase/nvidia-patch/blob/3193b4b1cea91527bf09ea9b8db5aade6a3f3c0a/win/nvfbcwrp/nvfbcwrp_main.cpp#L23-L25 .
const unsigned int MAGIC_PRIVATE_DATA[4] = { 0xAEF57AC5, 0x401D1A39, 0x1B856BBE, 0x9ED0CEBA };
params.privateData = MAGIC_PRIVATE_DATA;
params.privateDataSize = sizeof(MAGIC_PRIVATE_DATA);
handle_t handle;
auto status = func.nvFBCCreateHandle(&handle.handle, ¶ms);
if (status) {
BOOST_LOG(error) << "Failed to create session: "sv << handle.last_error();
return std::nullopt;
}
handle.handle_flags[SESSION_HANDLE] = true;
return handle;
}
const char *
last_error() {
return func.nvFBCGetLastErrorStr(handle);
}
std::optional<NVFBC_GET_STATUS_PARAMS>
status() {
NVFBC_GET_STATUS_PARAMS params { NVFBC_GET_STATUS_PARAMS_VER };
auto status = func.nvFBCGetStatus(handle, ¶ms);
if (status) {
BOOST_LOG(error) << "Failed to get NvFBC status: "sv << last_error();
return std::nullopt;
}
return params;
}
int
capture(NVFBC_CREATE_CAPTURE_SESSION_PARAMS &capture_params) {
if (func.nvFBCCreateCaptureSession(handle, &capture_params)) {
BOOST_LOG(error) << "Failed to start capture session: "sv << last_error();
return -1;
}
handle_flags[SESSION_CAPTURE] = true;
NVFBC_TOCUDA_SETUP_PARAMS setup_params {
NVFBC_TOCUDA_SETUP_PARAMS_VER,
NVFBC_BUFFER_FORMAT_BGRA,
};
if (func.nvFBCToCudaSetUp(handle, &setup_params)) {
BOOST_LOG(error) << "Failed to setup cuda interop with nvFBC: "sv << last_error();
return -1;
}
return 0;
}
int
stop() {
if (!handle_flags[SESSION_CAPTURE]) {
return 0;
}
NVFBC_DESTROY_CAPTURE_SESSION_PARAMS params { NVFBC_DESTROY_CAPTURE_SESSION_PARAMS_VER };
if (func.nvFBCDestroyCaptureSession(handle, ¶ms)) {
BOOST_LOG(error) << "Couldn't destroy capture session: "sv << last_error();
return -1;
}
handle_flags[SESSION_CAPTURE] = false;
return 0;
}
int
reset() {
if (!handle_flags[SESSION_HANDLE]) {
return 0;
}
stop();
NVFBC_DESTROY_HANDLE_PARAMS params { NVFBC_DESTROY_HANDLE_PARAMS_VER };
ctx_t ctx { handle };
if (func.nvFBCDestroyHandle(handle, ¶ms)) {
BOOST_LOG(error) << "Couldn't destroy session handle: "sv << func.nvFBCGetLastErrorStr(handle);
}
handle_flags[SESSION_HANDLE] = false;
return 0;
}
~handle_t() {
reset();
}
std::bitset<MAX_FLAGS> handle_flags;
NVFBC_SESSION_HANDLE handle;
};
class display_t: public platf::display_t {
public:
int
init(const std::string_view &display_name, const ::video::config_t &config) {
auto handle = handle_t::make();
if (!handle) {
return -1;
}
ctx_t ctx { handle->handle };
auto status_params = handle->status();
if (!status_params) {
return -1;
}
int streamedMonitor = -1;
if (!display_name.empty()) {
if (status_params->bXRandRAvailable) {
auto monitor_nr = util::from_view(display_name);
if (monitor_nr < 0 || monitor_nr >= status_params->dwOutputNum) {
BOOST_LOG(warning) << "Can't stream monitor ["sv << monitor_nr << "], it needs to be between [0] and ["sv << status_params->dwOutputNum - 1 << "], defaulting to virtual desktop"sv;
}
else {
streamedMonitor = monitor_nr;
}
}
else {
BOOST_LOG(warning) << "XrandR not available, streaming entire virtual desktop"sv;
}
}
delay = std::chrono::nanoseconds { 1s } / config.framerate;
capture_params = NVFBC_CREATE_CAPTURE_SESSION_PARAMS { NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER };
capture_params.eCaptureType = NVFBC_CAPTURE_SHARED_CUDA;
capture_params.bDisableAutoModesetRecovery = nv_bool(true);
capture_params.dwSamplingRateMs = 1000 /* ms */ / config.framerate;
if (streamedMonitor != -1) {
auto &output = status_params->outputs[streamedMonitor];
width = output.trackedBox.w;
height = output.trackedBox.h;
offset_x = output.trackedBox.x;
offset_y = output.trackedBox.y;
capture_params.eTrackingType = NVFBC_TRACKING_OUTPUT;
capture_params.dwOutputId = output.dwId;
}
else {
capture_params.eTrackingType = NVFBC_TRACKING_SCREEN;
width = status_params->screenSize.w;
height = status_params->screenSize.h;
}
env_width = status_params->screenSize.w;
env_height = status_params->screenSize.h;
this->handle = std::move(*handle);
return 0;
}
platf::capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override {
auto next_frame = std::chrono::steady_clock::now();
{
// We must create at least one texture on this thread before calling NvFBCToCudaSetUp()
// Otherwise it fails with "Unable to register an OpenGL buffer to a CUDA resource (result: 201)" message
std::shared_ptr<platf::img_t> img_dummy;
pull_free_image_cb(img_dummy);
}
// Force display_t::capture to initialize handle_t::capture
cursor_visible = !*cursor;
ctx_t ctx { handle.handle };
auto fg = util::fail_guard([&]() {
handle.reset();
});
sleep_overshoot_logger.reset();
while (true) {
auto now = std::chrono::steady_clock::now();
if (next_frame > now) {
std::this_thread::sleep_for(next_frame - now);
sleep_overshoot_logger.first_point(next_frame);
sleep_overshoot_logger.second_point_now_and_log();
}
next_frame += delay;
if (next_frame < now) { // some major slowdown happened; we couldn't keep up
next_frame = now + delay;
}
std::shared_ptr<platf::img_t> img_out;
auto status = snapshot(pull_free_image_cb, img_out, 150ms, *cursor);
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return platf::capture_e::ok;
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return platf::capture_e::ok;
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv << (int) status << ']';
return status;
}
}
return platf::capture_e::ok;
}
// Reinitialize the capture session.
platf::capture_e
reinit(bool cursor) {
if (handle.stop()) {
return platf::capture_e::error;
}
cursor_visible = cursor;
if (cursor) {
capture_params.bPushModel = nv_bool(false);
capture_params.bWithCursor = nv_bool(true);
capture_params.bAllowDirectCapture = nv_bool(false);
}
else {
capture_params.bPushModel = nv_bool(true);
capture_params.bWithCursor = nv_bool(false);
capture_params.bAllowDirectCapture = nv_bool(true);
}
if (handle.capture(capture_params)) {
return platf::capture_e::error;
}
// If trying to capture directly, test if it actually does.
if (capture_params.bAllowDirectCapture) {
CUdeviceptr device_ptr;
NVFBC_FRAME_GRAB_INFO info;
NVFBC_TOCUDA_GRAB_FRAME_PARAMS grab {
NVFBC_TOCUDA_GRAB_FRAME_PARAMS_VER,
NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT,
&device_ptr,
&info,
0,
};
// Direct Capture may fail the first few times, even if it's possible
for (int x = 0; x < 3; ++x) {
if (auto status = func.nvFBCToCudaGrabFrame(handle.handle, &grab)) {
if (status == NVFBC_ERR_MUST_RECREATE) {
return platf::capture_e::reinit;
}
BOOST_LOG(error) << "Couldn't capture nvFramebuffer: "sv << handle.last_error();
return platf::capture_e::error;
}
if (info.bDirectCapture) {
break;
}
BOOST_LOG(debug) << "Direct capture failed attempt ["sv << x << ']';
}
if (!info.bDirectCapture) {
BOOST_LOG(debug) << "Direct capture failed, trying the extra copy method"sv;
// Direct capture failed
capture_params.bPushModel = nv_bool(false);
capture_params.bWithCursor = nv_bool(false);
capture_params.bAllowDirectCapture = nv_bool(false);
if (handle.stop() || handle.capture(capture_params)) {
return platf::capture_e::error;
}
}
}
return platf::capture_e::ok;
}
platf::capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor) {
if (cursor != cursor_visible) {
auto status = reinit(cursor);
if (status != platf::capture_e::ok) {
return status;
}
}
CUdeviceptr device_ptr;
NVFBC_FRAME_GRAB_INFO info;
NVFBC_TOCUDA_GRAB_FRAME_PARAMS grab {
NVFBC_TOCUDA_GRAB_FRAME_PARAMS_VER,
NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT,
&device_ptr,
&info,
(std::uint32_t) timeout.count(),
};
if (auto status = func.nvFBCToCudaGrabFrame(handle.handle, &grab)) {
if (status == NVFBC_ERR_MUST_RECREATE) {
return platf::capture_e::reinit;
}
BOOST_LOG(error) << "Couldn't capture nvFramebuffer: "sv << handle.last_error();
return platf::capture_e::error;
}
if (!pull_free_image_cb(img_out)) {
return platf::capture_e::interrupted;
}
auto img = (img_t *) img_out.get();
if (img->tex.copy((std::uint8_t *) device_ptr, img->height, img->row_pitch)) {
return platf::capture_e::error;
}
return platf::capture_e::ok;
}
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(platf::pix_fmt_e pix_fmt) {
return ::cuda::make_avcodec_encode_device(width, height, true);
}
std::shared_ptr<platf::img_t>
alloc_img() override {
auto img = std::make_shared<cuda::img_t>();
img->data = nullptr;
img->width = width;
img->height = height;
img->pixel_pitch = 4;
img->row_pitch = img->width * img->pixel_pitch;
auto tex_opt = tex_t::make(height, width * img->pixel_pitch);
if (!tex_opt) {
return nullptr;
}
img->tex = std::move(*tex_opt);
return img;
};
int
dummy_img(platf::img_t *) override {
return 0;
}
std::chrono::nanoseconds delay;
bool cursor_visible;
handle_t handle;
NVFBC_CREATE_CAPTURE_SESSION_PARAMS capture_params;
};
} // namespace nvfbc
} // namespace cuda
namespace platf {
std::shared_ptr<display_t>
nvfbc_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config) {
if (hwdevice_type != mem_type_e::cuda) {
BOOST_LOG(error) << "Could not initialize nvfbc display with the given hw device type"sv;
return nullptr;
}
auto display = std::make_shared<cuda::nvfbc::display_t>();
if (display->init(display_name, config)) {
return nullptr;
}
return display;
}
std::vector<std::string>
nvfbc_display_names() {
if (cuda::init() || cuda::nvfbc::init()) {
return {};
}
std::vector<std::string> display_names;
auto handle = cuda::nvfbc::handle_t::make();
if (!handle) {
return {};
}
auto status_params = handle->status();
if (!status_params) {
return {};
}
if (!status_params->bIsCapturePossible) {
BOOST_LOG(error) << "NVidia driver doesn't support NvFBC screencasting"sv;
}
BOOST_LOG(info) << "Found ["sv << status_params->dwOutputNum << "] outputs"sv;
BOOST_LOG(info) << "Virtual Desktop: "sv << status_params->screenSize.w << 'x' << status_params->screenSize.h;
BOOST_LOG(info) << "XrandR: "sv << (status_params->bXRandRAvailable ? "available"sv : "unavailable"sv);
for (auto x = 0; x < status_params->dwOutputNum; ++x) {
auto &output = status_params->outputs[x];
BOOST_LOG(info) << "-- Output --"sv;
BOOST_LOG(debug) << " ID: "sv << output.dwId;
BOOST_LOG(debug) << " Name: "sv << output.name;
BOOST_LOG(info) << " Resolution: "sv << output.trackedBox.w << 'x' << output.trackedBox.h;
BOOST_LOG(info) << " Offset: "sv << output.trackedBox.x << 'x' << output.trackedBox.y;
display_names.emplace_back(std::to_string(x));
}
return display_names;
}
} // namespace platf
| 30,804
|
C++
|
.cpp
| 826
| 29.583535
| 194
| 0.601862
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,000
|
graphics.cpp
|
LizardByte_Sunshine/src/platform/linux/graphics.cpp
|
/**
* @file src/platform/linux/graphics.cpp
* @brief Definitions for graphics related functions.
*/
#include "graphics.h"
#include "src/file_handler.h"
#include "src/logging.h"
#include "src/video.h"
#include <fcntl.h>
extern "C" {
#include <libavutil/pixdesc.h>
}
// I want to have as little build dependencies as possible
// There aren't that many DRM_FORMAT I need to use, so define them here
//
// They aren't likely to change any time soon.
#define fourcc_code(a, b, c, d) ((std::uint32_t)(a) | ((std::uint32_t)(b) << 8) | \
((std::uint32_t)(c) << 16) | ((std::uint32_t)(d) << 24))
#define fourcc_mod_code(vendor, val) ((((uint64_t) vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(0, ((1ULL << 56) - 1))
#if !defined(SUNSHINE_SHADERS_DIR) // for testing this needs to be defined in cmake as we don't do an install
#define SUNSHINE_SHADERS_DIR SUNSHINE_ASSETS_DIR "/shaders/opengl"
#endif
using namespace std::literals;
namespace gl {
GladGLContext ctx;
void
drain_errors(const std::string_view &prefix) {
GLenum err;
while ((err = ctx.GetError()) != GL_NO_ERROR) {
BOOST_LOG(error) << "GL: "sv << prefix << ": ["sv << util::hex(err).to_string_view() << ']';
}
}
tex_t::~tex_t() {
if (size() != 0) {
ctx.DeleteTextures(size(), begin());
}
}
tex_t
tex_t::make(std::size_t count) {
tex_t textures { count };
ctx.GenTextures(textures.size(), textures.begin());
float color[] = { 0.0f, 0.0f, 0.0f, 1.0f };
for (auto tex : textures) {
gl::ctx.BindTexture(GL_TEXTURE_2D, tex);
gl::ctx.TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); // x
gl::ctx.TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // y
gl::ctx.TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
gl::ctx.TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl::ctx.TexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, color);
}
return textures;
}
frame_buf_t::~frame_buf_t() {
if (begin()) {
ctx.DeleteFramebuffers(size(), begin());
}
}
frame_buf_t
frame_buf_t::make(std::size_t count) {
frame_buf_t frame_buf { count };
ctx.GenFramebuffers(frame_buf.size(), frame_buf.begin());
return frame_buf;
}
void
frame_buf_t::copy(int id, int texture, int offset_x, int offset_y, int width, int height) {
gl::ctx.BindFramebuffer(GL_FRAMEBUFFER, (*this)[id]);
gl::ctx.ReadBuffer(GL_COLOR_ATTACHMENT0 + id);
gl::ctx.BindTexture(GL_TEXTURE_2D, texture);
gl::ctx.CopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, offset_x, offset_y, width, height);
}
std::string
shader_t::err_str() {
int length;
ctx.GetShaderiv(handle(), GL_INFO_LOG_LENGTH, &length);
std::string string;
string.resize(length);
ctx.GetShaderInfoLog(handle(), length, &length, string.data());
string.resize(length - 1);
return string;
}
util::Either<shader_t, std::string>
shader_t::compile(const std::string_view &source, GLenum type) {
shader_t shader;
auto data = source.data();
GLint length = source.length();
shader._shader.el = ctx.CreateShader(type);
ctx.ShaderSource(shader.handle(), 1, &data, &length);
ctx.CompileShader(shader.handle());
int status = 0;
ctx.GetShaderiv(shader.handle(), GL_COMPILE_STATUS, &status);
if (!status) {
return shader.err_str();
}
return shader;
}
GLuint
shader_t::handle() const {
return _shader.el;
}
buffer_t
buffer_t::make(util::buffer_t<GLint> &&offsets, const char *block, const std::string_view &data) {
buffer_t buffer;
buffer._block = block;
buffer._size = data.size();
buffer._offsets = std::move(offsets);
ctx.GenBuffers(1, &buffer._buffer.el);
ctx.BindBuffer(GL_UNIFORM_BUFFER, buffer.handle());
ctx.BufferData(GL_UNIFORM_BUFFER, data.size(), (const std::uint8_t *) data.data(), GL_DYNAMIC_DRAW);
return buffer;
}
GLuint
buffer_t::handle() const {
return _buffer.el;
}
const char *
buffer_t::block() const {
return _block;
}
void
buffer_t::update(const std::string_view &view, std::size_t offset) {
ctx.BindBuffer(GL_UNIFORM_BUFFER, handle());
ctx.BufferSubData(GL_UNIFORM_BUFFER, offset, view.size(), (const void *) view.data());
}
void
buffer_t::update(std::string_view *members, std::size_t count, std::size_t offset) {
util::buffer_t<std::uint8_t> buffer { _size };
for (int x = 0; x < count; ++x) {
auto val = members[x];
std::copy_n((const std::uint8_t *) val.data(), val.size(), &buffer[_offsets[x]]);
}
update(util::view(buffer.begin(), buffer.end()), offset);
}
std::string
program_t::err_str() {
int length;
ctx.GetProgramiv(handle(), GL_INFO_LOG_LENGTH, &length);
std::string string;
string.resize(length);
ctx.GetShaderInfoLog(handle(), length, &length, string.data());
string.resize(length - 1);
return string;
}
util::Either<program_t, std::string>
program_t::link(const shader_t &vert, const shader_t &frag) {
program_t program;
program._program.el = ctx.CreateProgram();
ctx.AttachShader(program.handle(), vert.handle());
ctx.AttachShader(program.handle(), frag.handle());
// p_handle stores a copy of the program handle, since program will be moved before
// the fail guard function is called.
auto fg = util::fail_guard([p_handle = program.handle(), &vert, &frag]() {
ctx.DetachShader(p_handle, vert.handle());
ctx.DetachShader(p_handle, frag.handle());
});
ctx.LinkProgram(program.handle());
int status = 0;
ctx.GetProgramiv(program.handle(), GL_LINK_STATUS, &status);
if (!status) {
return program.err_str();
}
return program;
}
void
program_t::bind(const buffer_t &buffer) {
ctx.UseProgram(handle());
auto i = ctx.GetUniformBlockIndex(handle(), buffer.block());
ctx.BindBufferBase(GL_UNIFORM_BUFFER, i, buffer.handle());
}
std::optional<buffer_t>
program_t::uniform(const char *block, std::pair<const char *, std::string_view> *members, std::size_t count) {
auto i = ctx.GetUniformBlockIndex(handle(), block);
if (i == GL_INVALID_INDEX) {
BOOST_LOG(error) << "Couldn't find index of ["sv << block << ']';
return std::nullopt;
}
int size;
ctx.GetActiveUniformBlockiv(handle(), i, GL_UNIFORM_BLOCK_DATA_SIZE, &size);
bool error_flag = false;
util::buffer_t<GLint> offsets { count };
auto indices = (std::uint32_t *) alloca(count * sizeof(std::uint32_t));
auto names = (const char **) alloca(count * sizeof(const char *));
auto names_p = names;
std::for_each_n(members, count, [names_p](auto &member) mutable {
*names_p++ = std::get<0>(member);
});
std::fill_n(indices, count, GL_INVALID_INDEX);
ctx.GetUniformIndices(handle(), count, names, indices);
for (int x = 0; x < count; ++x) {
if (indices[x] == GL_INVALID_INDEX) {
error_flag = true;
BOOST_LOG(error) << "Couldn't find ["sv << block << '.' << members[x].first << ']';
}
}
if (error_flag) {
return std::nullopt;
}
ctx.GetActiveUniformsiv(handle(), count, indices, GL_UNIFORM_OFFSET, offsets.begin());
util::buffer_t<std::uint8_t> buffer { (std::size_t) size };
for (int x = 0; x < count; ++x) {
auto val = std::get<1>(members[x]);
std::copy_n((const std::uint8_t *) val.data(), val.size(), &buffer[offsets[x]]);
}
return buffer_t::make(std::move(offsets), block, std::string_view { (char *) buffer.begin(), buffer.size() });
}
GLuint
program_t::handle() const {
return _program.el;
}
} // namespace gl
namespace gbm {
device_destroy_fn device_destroy;
create_device_fn create_device;
int
init() {
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libgbm.so.1", "libgbm.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<GLADapiproc *, const char *>> funcs {
{ (GLADapiproc *) &device_destroy, "gbm_device_destroy" },
{ (GLADapiproc *) &create_device, "gbm_create_device" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
} // namespace gbm
namespace egl {
constexpr auto EGL_LINUX_DMA_BUF_EXT = 0x3270;
constexpr auto EGL_LINUX_DRM_FOURCC_EXT = 0x3271;
constexpr auto EGL_DMA_BUF_PLANE0_FD_EXT = 0x3272;
constexpr auto EGL_DMA_BUF_PLANE0_OFFSET_EXT = 0x3273;
constexpr auto EGL_DMA_BUF_PLANE0_PITCH_EXT = 0x3274;
constexpr auto EGL_DMA_BUF_PLANE1_FD_EXT = 0x3275;
constexpr auto EGL_DMA_BUF_PLANE1_OFFSET_EXT = 0x3276;
constexpr auto EGL_DMA_BUF_PLANE1_PITCH_EXT = 0x3277;
constexpr auto EGL_DMA_BUF_PLANE2_FD_EXT = 0x3278;
constexpr auto EGL_DMA_BUF_PLANE2_OFFSET_EXT = 0x3279;
constexpr auto EGL_DMA_BUF_PLANE2_PITCH_EXT = 0x327A;
constexpr auto EGL_DMA_BUF_PLANE3_FD_EXT = 0x3440;
constexpr auto EGL_DMA_BUF_PLANE3_OFFSET_EXT = 0x3441;
constexpr auto EGL_DMA_BUF_PLANE3_PITCH_EXT = 0x3442;
constexpr auto EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT = 0x3443;
constexpr auto EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT = 0x3444;
constexpr auto EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT = 0x3445;
constexpr auto EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT = 0x3446;
constexpr auto EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT = 0x3447;
constexpr auto EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT = 0x3448;
constexpr auto EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT = 0x3449;
constexpr auto EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT = 0x344A;
bool
fail() {
return eglGetError() != EGL_SUCCESS;
}
/**
* @memberof egl::display_t
*/
display_t
make_display(std::variant<gbm::gbm_t::pointer, wl_display *, _XDisplay *> native_display) {
constexpr auto EGL_PLATFORM_GBM_MESA = 0x31D7;
constexpr auto EGL_PLATFORM_WAYLAND_KHR = 0x31D8;
constexpr auto EGL_PLATFORM_X11_KHR = 0x31D5;
int egl_platform;
void *native_display_p;
switch (native_display.index()) {
case 0:
egl_platform = EGL_PLATFORM_GBM_MESA;
native_display_p = std::get<0>(native_display);
break;
case 1:
egl_platform = EGL_PLATFORM_WAYLAND_KHR;
native_display_p = std::get<1>(native_display);
break;
case 2:
egl_platform = EGL_PLATFORM_X11_KHR;
native_display_p = std::get<2>(native_display);
break;
default:
BOOST_LOG(error) << "egl::make_display(): Index ["sv << native_display.index() << "] not implemented"sv;
return nullptr;
}
// native_display.left() equals native_display.right()
display_t display = eglGetPlatformDisplay(egl_platform, native_display_p, nullptr);
if (fail()) {
BOOST_LOG(error) << "Couldn't open EGL display: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return nullptr;
}
int major, minor;
if (!eglInitialize(display.get(), &major, &minor)) {
BOOST_LOG(error) << "Couldn't initialize EGL display: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return nullptr;
}
const char *extension_st = eglQueryString(display.get(), EGL_EXTENSIONS);
const char *version = eglQueryString(display.get(), EGL_VERSION);
const char *vendor = eglQueryString(display.get(), EGL_VENDOR);
const char *apis = eglQueryString(display.get(), EGL_CLIENT_APIS);
BOOST_LOG(debug) << "EGL: ["sv << vendor << "]: version ["sv << version << ']';
BOOST_LOG(debug) << "API's supported: ["sv << apis << ']';
const char *extensions[] {
"EGL_KHR_create_context",
"EGL_KHR_surfaceless_context",
"EGL_EXT_image_dma_buf_import",
"EGL_EXT_image_dma_buf_import_modifiers",
};
for (auto ext : extensions) {
if (!std::strstr(extension_st, ext)) {
BOOST_LOG(error) << "Missing extension: ["sv << ext << ']';
return nullptr;
}
}
return display;
}
std::optional<ctx_t>
make_ctx(display_t::pointer display) {
constexpr int conf_attr[] {
EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_NONE
};
int count;
EGLConfig conf;
if (!eglChooseConfig(display, conf_attr, &conf, 1, &count)) {
BOOST_LOG(error) << "Couldn't set config attributes: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return std::nullopt;
}
if (!eglBindAPI(EGL_OPENGL_API)) {
BOOST_LOG(error) << "Couldn't bind API: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return std::nullopt;
}
constexpr int attr[] {
EGL_CONTEXT_CLIENT_VERSION, 3, EGL_NONE
};
ctx_t ctx { display, eglCreateContext(display, conf, EGL_NO_CONTEXT, attr) };
if (fail()) {
BOOST_LOG(error) << "Couldn't create EGL context: ["sv << util::hex(eglGetError()).to_string_view() << ']';
return std::nullopt;
}
TUPLE_EL_REF(ctx_p, 1, ctx.el);
if (!eglMakeCurrent(display, EGL_NO_SURFACE, EGL_NO_SURFACE, ctx_p)) {
BOOST_LOG(error) << "Couldn't make current display"sv;
return std::nullopt;
}
if (!gladLoadGLContext(&gl::ctx, eglGetProcAddress)) {
BOOST_LOG(error) << "Couldn't load OpenGL library"sv;
return std::nullopt;
}
BOOST_LOG(debug) << "GL: vendor: "sv << gl::ctx.GetString(GL_VENDOR);
BOOST_LOG(debug) << "GL: renderer: "sv << gl::ctx.GetString(GL_RENDERER);
BOOST_LOG(debug) << "GL: version: "sv << gl::ctx.GetString(GL_VERSION);
BOOST_LOG(debug) << "GL: shader: "sv << gl::ctx.GetString(GL_SHADING_LANGUAGE_VERSION);
gl::ctx.PixelStorei(GL_UNPACK_ALIGNMENT, 1);
return ctx;
}
struct plane_attr_t {
EGLAttrib fd;
EGLAttrib offset;
EGLAttrib pitch;
EGLAttrib lo;
EGLAttrib hi;
};
inline plane_attr_t
get_plane(std::uint32_t plane_indice) {
switch (plane_indice) {
case 0:
return {
EGL_DMA_BUF_PLANE0_FD_EXT,
EGL_DMA_BUF_PLANE0_OFFSET_EXT,
EGL_DMA_BUF_PLANE0_PITCH_EXT,
EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT,
EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT,
};
case 1:
return {
EGL_DMA_BUF_PLANE1_FD_EXT,
EGL_DMA_BUF_PLANE1_OFFSET_EXT,
EGL_DMA_BUF_PLANE1_PITCH_EXT,
EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT,
EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT,
};
case 2:
return {
EGL_DMA_BUF_PLANE2_FD_EXT,
EGL_DMA_BUF_PLANE2_OFFSET_EXT,
EGL_DMA_BUF_PLANE2_PITCH_EXT,
EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT,
EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT,
};
case 3:
return {
EGL_DMA_BUF_PLANE3_FD_EXT,
EGL_DMA_BUF_PLANE3_OFFSET_EXT,
EGL_DMA_BUF_PLANE3_PITCH_EXT,
EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT,
EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT,
};
}
// Avoid warning
return {};
}
/**
* @brief Get EGL attributes for eglCreateImage() to import the provided surface.
* @param surface The surface descriptor.
* @return Vector of EGL attributes.
*/
std::vector<EGLAttrib>
surface_descriptor_to_egl_attribs(const surface_descriptor_t &surface) {
std::vector<EGLAttrib> attribs;
attribs.emplace_back(EGL_WIDTH);
attribs.emplace_back(surface.width);
attribs.emplace_back(EGL_HEIGHT);
attribs.emplace_back(surface.height);
attribs.emplace_back(EGL_LINUX_DRM_FOURCC_EXT);
attribs.emplace_back(surface.fourcc);
for (auto x = 0; x < 4; ++x) {
auto fd = surface.fds[x];
if (fd < 0) {
continue;
}
auto plane_attr = get_plane(x);
attribs.emplace_back(plane_attr.fd);
attribs.emplace_back(fd);
attribs.emplace_back(plane_attr.offset);
attribs.emplace_back(surface.offsets[x]);
attribs.emplace_back(plane_attr.pitch);
attribs.emplace_back(surface.pitches[x]);
if (surface.modifier != DRM_FORMAT_MOD_INVALID) {
attribs.emplace_back(plane_attr.lo);
attribs.emplace_back(surface.modifier & 0xFFFFFFFF);
attribs.emplace_back(plane_attr.hi);
attribs.emplace_back(surface.modifier >> 32);
}
}
attribs.emplace_back(EGL_NONE);
return attribs;
}
std::optional<rgb_t>
import_source(display_t::pointer egl_display, const surface_descriptor_t &xrgb) {
auto attribs = surface_descriptor_to_egl_attribs(xrgb);
rgb_t rgb {
egl_display,
eglCreateImage(egl_display, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, nullptr, attribs.data()),
gl::tex_t::make(1)
};
if (!rgb->xrgb8) {
BOOST_LOG(error) << "Couldn't import RGB Image: "sv << util::hex(eglGetError()).to_string_view();
return std::nullopt;
}
gl::ctx.BindTexture(GL_TEXTURE_2D, rgb->tex[0]);
gl::ctx.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, rgb->xrgb8);
gl::ctx.BindTexture(GL_TEXTURE_2D, 0);
gl_drain_errors;
return rgb;
}
/**
* @brief Create a black RGB texture of the specified image size.
* @param img The image to use for texture sizing.
* @return The new RGB texture.
*/
rgb_t
create_blank(platf::img_t &img) {
rgb_t rgb {
EGL_NO_DISPLAY,
EGL_NO_IMAGE,
gl::tex_t::make(1)
};
gl::ctx.BindTexture(GL_TEXTURE_2D, rgb->tex[0]);
gl::ctx.TexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA8, img.width, img.height);
gl::ctx.BindTexture(GL_TEXTURE_2D, 0);
auto framebuf = gl::frame_buf_t::make(1);
framebuf.bind(&rgb->tex[0], &rgb->tex[0] + 1);
GLenum attachment = GL_COLOR_ATTACHMENT0;
gl::ctx.DrawBuffers(1, &attachment);
const GLuint rgb_black[] = { 0, 0, 0, 0 };
gl::ctx.ClearBufferuiv(GL_COLOR, 0, rgb_black);
gl_drain_errors;
return rgb;
}
std::optional<nv12_t>
import_target(display_t::pointer egl_display, std::array<file_t, nv12_img_t::num_fds> &&fds, const surface_descriptor_t &y, const surface_descriptor_t &uv) {
auto y_attribs = surface_descriptor_to_egl_attribs(y);
auto uv_attribs = surface_descriptor_to_egl_attribs(uv);
nv12_t nv12 {
egl_display,
eglCreateImage(egl_display, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, nullptr, y_attribs.data()),
eglCreateImage(egl_display, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, nullptr, uv_attribs.data()),
gl::tex_t::make(2),
gl::frame_buf_t::make(2),
std::move(fds)
};
if (!nv12->r8 || !nv12->bg88) {
BOOST_LOG(error) << "Couldn't import YUV target: "sv << util::hex(eglGetError()).to_string_view();
return std::nullopt;
}
gl::ctx.BindTexture(GL_TEXTURE_2D, nv12->tex[0]);
gl::ctx.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, nv12->r8);
gl::ctx.BindTexture(GL_TEXTURE_2D, nv12->tex[1]);
gl::ctx.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, nv12->bg88);
nv12->buf.bind(std::begin(nv12->tex), std::end(nv12->tex));
GLenum attachments[] {
GL_COLOR_ATTACHMENT0,
GL_COLOR_ATTACHMENT1
};
for (int x = 0; x < sizeof(attachments) / sizeof(decltype(attachments[0])); ++x) {
gl::ctx.BindFramebuffer(GL_FRAMEBUFFER, nv12->buf[x]);
gl::ctx.DrawBuffers(1, &attachments[x]);
const float y_black[] = { 0.0f, 0.0f, 0.0f, 0.0f };
const float uv_black[] = { 0.5f, 0.5f, 0.5f, 0.5f };
gl::ctx.ClearBufferfv(GL_COLOR, 0, x == 0 ? y_black : uv_black);
}
gl::ctx.BindFramebuffer(GL_FRAMEBUFFER, 0);
gl_drain_errors;
return nv12;
}
/**
* @brief Create biplanar YUV textures to render into.
* @param width Width of the target frame.
* @param height Height of the target frame.
* @param format Format of the target frame.
* @return The new RGB texture.
*/
std::optional<nv12_t>
create_target(int width, int height, AVPixelFormat format) {
nv12_t nv12 {
EGL_NO_DISPLAY,
EGL_NO_IMAGE,
EGL_NO_IMAGE,
gl::tex_t::make(2),
gl::frame_buf_t::make(2),
};
GLint y_format;
GLint uv_format;
// Determine the size of each plane element
auto fmt_desc = av_pix_fmt_desc_get(format);
if (fmt_desc->comp[0].depth <= 8) {
y_format = GL_R8;
uv_format = GL_RG8;
}
else if (fmt_desc->comp[0].depth <= 16) {
y_format = GL_R16;
uv_format = GL_RG16;
}
else {
BOOST_LOG(error) << "Unsupported target pixel format: "sv << format;
return std::nullopt;
}
gl::ctx.BindTexture(GL_TEXTURE_2D, nv12->tex[0]);
gl::ctx.TexStorage2D(GL_TEXTURE_2D, 1, y_format, width, height);
gl::ctx.BindTexture(GL_TEXTURE_2D, nv12->tex[1]);
gl::ctx.TexStorage2D(GL_TEXTURE_2D, 1, uv_format,
width >> fmt_desc->log2_chroma_w, height >> fmt_desc->log2_chroma_h);
nv12->buf.bind(std::begin(nv12->tex), std::end(nv12->tex));
GLenum attachments[] {
GL_COLOR_ATTACHMENT0,
GL_COLOR_ATTACHMENT1
};
for (int x = 0; x < sizeof(attachments) / sizeof(decltype(attachments[0])); ++x) {
gl::ctx.BindFramebuffer(GL_FRAMEBUFFER, nv12->buf[x]);
gl::ctx.DrawBuffers(1, &attachments[x]);
const float y_black[] = { 0.0f, 0.0f, 0.0f, 0.0f };
const float uv_black[] = { 0.5f, 0.5f, 0.5f, 0.5f };
gl::ctx.ClearBufferfv(GL_COLOR, 0, x == 0 ? y_black : uv_black);
}
gl::ctx.BindFramebuffer(GL_FRAMEBUFFER, 0);
gl_drain_errors;
return nv12;
}
void
sws_t::apply_colorspace(const video::sunshine_colorspace_t &colorspace) {
auto color_p = video::color_vectors_from_colorspace(colorspace);
std::string_view members[] {
util::view(color_p->color_vec_y),
util::view(color_p->color_vec_u),
util::view(color_p->color_vec_v),
util::view(color_p->range_y),
util::view(color_p->range_uv),
};
color_matrix.update(members, sizeof(members) / sizeof(decltype(members[0])));
program[0].bind(color_matrix);
program[1].bind(color_matrix);
}
std::optional<sws_t>
sws_t::make(int in_width, int in_height, int out_width, int out_height, gl::tex_t &&tex) {
sws_t sws;
sws.serial = std::numeric_limits<std::uint64_t>::max();
// Ensure aspect ratio is maintained
auto scalar = std::fminf(out_width / (float) in_width, out_height / (float) in_height);
auto out_width_f = in_width * scalar;
auto out_height_f = in_height * scalar;
// result is always positive
auto offsetX_f = (out_width - out_width_f) / 2;
auto offsetY_f = (out_height - out_height_f) / 2;
sws.out_width = out_width_f;
sws.out_height = out_height_f;
sws.in_width = in_width;
sws.in_height = in_height;
sws.offsetX = offsetX_f;
sws.offsetY = offsetY_f;
auto width_i = 1.0f / sws.out_width;
{
const char *sources[] {
SUNSHINE_SHADERS_DIR "/ConvertUV.frag",
SUNSHINE_SHADERS_DIR "/ConvertUV.vert",
SUNSHINE_SHADERS_DIR "/ConvertY.frag",
SUNSHINE_SHADERS_DIR "/Scene.vert",
SUNSHINE_SHADERS_DIR "/Scene.frag",
};
GLenum shader_type[2] {
GL_FRAGMENT_SHADER,
GL_VERTEX_SHADER,
};
constexpr auto count = sizeof(sources) / sizeof(const char *);
util::Either<gl::shader_t, std::string> compiled_sources[count];
bool error_flag = false;
for (int x = 0; x < count; ++x) {
auto &compiled_source = compiled_sources[x];
compiled_source = gl::shader_t::compile(file_handler::read_file(sources[x]), shader_type[x % 2]);
gl_drain_errors;
if (compiled_source.has_right()) {
BOOST_LOG(error) << sources[x] << ": "sv << compiled_source.right();
error_flag = true;
}
}
if (error_flag) {
return std::nullopt;
}
auto program = gl::program_t::link(compiled_sources[3].left(), compiled_sources[4].left());
if (program.has_right()) {
BOOST_LOG(error) << "GL linker: "sv << program.right();
return std::nullopt;
}
// Cursor - shader
sws.program[2] = std::move(program.left());
program = gl::program_t::link(compiled_sources[1].left(), compiled_sources[0].left());
if (program.has_right()) {
BOOST_LOG(error) << "GL linker: "sv << program.right();
return std::nullopt;
}
// UV - shader
sws.program[1] = std::move(program.left());
program = gl::program_t::link(compiled_sources[3].left(), compiled_sources[2].left());
if (program.has_right()) {
BOOST_LOG(error) << "GL linker: "sv << program.right();
return std::nullopt;
}
// Y - shader
sws.program[0] = std::move(program.left());
}
auto loc_width_i = gl::ctx.GetUniformLocation(sws.program[1].handle(), "width_i");
if (loc_width_i < 0) {
BOOST_LOG(error) << "Couldn't find uniform [width_i]"sv;
return std::nullopt;
}
gl::ctx.UseProgram(sws.program[1].handle());
gl::ctx.Uniform1fv(loc_width_i, 1, &width_i);
auto color_p = video::color_vectors_from_colorspace(video::colorspace_e::rec601, false);
std::pair<const char *, std::string_view> members[] {
std::make_pair("color_vec_y", util::view(color_p->color_vec_y)),
std::make_pair("color_vec_u", util::view(color_p->color_vec_u)),
std::make_pair("color_vec_v", util::view(color_p->color_vec_v)),
std::make_pair("range_y", util::view(color_p->range_y)),
std::make_pair("range_uv", util::view(color_p->range_uv)),
};
auto color_matrix = sws.program[0].uniform("ColorMatrix", members, sizeof(members) / sizeof(decltype(members[0])));
if (!color_matrix) {
return std::nullopt;
}
sws.color_matrix = std::move(*color_matrix);
sws.tex = std::move(tex);
sws.cursor_framebuffer = gl::frame_buf_t::make(1);
sws.cursor_framebuffer.bind(&sws.tex[0], &sws.tex[1]);
sws.program[0].bind(sws.color_matrix);
sws.program[1].bind(sws.color_matrix);
gl::ctx.BlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
gl_drain_errors;
return sws;
}
int
sws_t::blank(gl::frame_buf_t &fb, int offsetX, int offsetY, int width, int height) {
auto f = [&]() {
std::swap(offsetX, this->offsetX);
std::swap(offsetY, this->offsetY);
std::swap(width, this->out_width);
std::swap(height, this->out_height);
};
f();
auto fg = util::fail_guard(f);
return convert(fb);
}
std::optional<sws_t>
sws_t::make(int in_width, int in_height, int out_width, int out_height, AVPixelFormat format) {
GLint gl_format;
// Decide the bit depth format of the backing texture based the target frame format
auto fmt_desc = av_pix_fmt_desc_get(format);
switch (fmt_desc->comp[0].depth) {
case 8:
gl_format = GL_RGBA8;
break;
case 10:
gl_format = GL_RGB10_A2;
break;
case 12:
gl_format = GL_RGBA12;
break;
case 16:
gl_format = GL_RGBA16;
break;
default:
BOOST_LOG(error) << "Unsupported pixel format for EGL frame: "sv << (int) format;
return std::nullopt;
}
auto tex = gl::tex_t::make(2);
gl::ctx.BindTexture(GL_TEXTURE_2D, tex[0]);
gl::ctx.TexStorage2D(GL_TEXTURE_2D, 1, gl_format, in_width, in_height);
return make(in_width, in_height, out_width, out_height, std::move(tex));
}
void
sws_t::load_ram(platf::img_t &img) {
loaded_texture = tex[0];
gl::ctx.BindTexture(GL_TEXTURE_2D, loaded_texture);
gl::ctx.TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, img.width, img.height, GL_BGRA, GL_UNSIGNED_BYTE, img.data);
}
void
sws_t::load_vram(img_descriptor_t &img, int offset_x, int offset_y, int texture) {
// When only a sub-part of the image must be encoded...
const bool copy = offset_x || offset_y || img.sd.width != in_width || img.sd.height != in_height;
if (copy) {
auto framebuf = gl::frame_buf_t::make(1);
framebuf.bind(&texture, &texture + 1);
loaded_texture = tex[0];
framebuf.copy(0, loaded_texture, offset_x, offset_y, in_width, in_height);
}
else {
loaded_texture = texture;
}
if (img.data) {
GLenum attachment = GL_COLOR_ATTACHMENT0;
gl::ctx.BindFramebuffer(GL_FRAMEBUFFER, cursor_framebuffer[0]);
gl::ctx.UseProgram(program[2].handle());
// When a copy has already been made...
if (!copy) {
gl::ctx.BindTexture(GL_TEXTURE_2D, texture);
gl::ctx.DrawBuffers(1, &attachment);
gl::ctx.Viewport(0, 0, in_width, in_height);
gl::ctx.DrawArrays(GL_TRIANGLES, 0, 3);
loaded_texture = tex[0];
}
gl::ctx.BindTexture(GL_TEXTURE_2D, tex[1]);
if (serial != img.serial) {
serial = img.serial;
gl::ctx.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, img.src_w, img.src_h, 0, GL_BGRA, GL_UNSIGNED_BYTE, img.data);
}
gl::ctx.Enable(GL_BLEND);
gl::ctx.DrawBuffers(1, &attachment);
#ifndef NDEBUG
auto status = gl::ctx.CheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
BOOST_LOG(error) << "Pass Cursor: CheckFramebufferStatus() --> [0x"sv << util::hex(status).to_string_view() << ']';
return;
}
#endif
gl::ctx.Viewport(img.x, img.y, img.width, img.height);
gl::ctx.DrawArrays(GL_TRIANGLES, 0, 3);
gl::ctx.Disable(GL_BLEND);
gl::ctx.BindTexture(GL_TEXTURE_2D, 0);
gl::ctx.BindFramebuffer(GL_FRAMEBUFFER, 0);
}
}
int
sws_t::convert(gl::frame_buf_t &fb) {
gl::ctx.BindTexture(GL_TEXTURE_2D, loaded_texture);
GLenum attachments[] {
GL_COLOR_ATTACHMENT0,
GL_COLOR_ATTACHMENT1
};
for (int x = 0; x < sizeof(attachments) / sizeof(decltype(attachments[0])); ++x) {
gl::ctx.BindFramebuffer(GL_FRAMEBUFFER, fb[x]);
gl::ctx.DrawBuffers(1, &attachments[x]);
#ifndef NDEBUG
auto status = gl::ctx.CheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
BOOST_LOG(error) << "Pass "sv << x << ": CheckFramebufferStatus() --> [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
#endif
gl::ctx.UseProgram(program[x].handle());
gl::ctx.Viewport(offsetX / (x + 1), offsetY / (x + 1), out_width / (x + 1), out_height / (x + 1));
gl::ctx.DrawArrays(GL_TRIANGLES, 0, 3);
}
gl::ctx.BindTexture(GL_TEXTURE_2D, 0);
gl::ctx.Flush();
return 0;
}
} // namespace egl
void
free_frame(AVFrame *frame) {
av_frame_free(&frame);
}
| 30,626
|
C++
|
.cpp
| 803
| 32.369863
| 159
| 0.632276
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,001
|
misc.cpp
|
LizardByte_Sunshine/src/platform/linux/misc.cpp
|
/**
* @file src/platform/linux/misc.cpp
* @brief Miscellaneous definitions for Linux.
*/
// Required for in6_pktinfo with glibc headers
#ifndef _GNU_SOURCE
#define _GNU_SOURCE 1
#endif
// standard includes
#include <fstream>
#include <iostream>
// lib includes
#include <arpa/inet.h>
#include <boost/asio/ip/address.hpp>
#include <boost/asio/ip/host_name.hpp>
#include <boost/process/v1.hpp>
#include <dlfcn.h>
#include <fcntl.h>
#include <ifaddrs.h>
#include <netinet/udp.h>
#include <pwd.h>
#include <unistd.h>
// local includes
#include "graphics.h"
#include "misc.h"
#include "src/config.h"
#include "src/entry_handler.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "vaapi.h"
#ifdef __GNUC__
#define SUNSHINE_GNUC_EXTENSION __extension__
#else
#define SUNSHINE_GNUC_EXTENSION
#endif
using namespace std::literals;
namespace fs = std::filesystem;
namespace bp = boost::process;
window_system_e window_system;
namespace dyn {
void *
handle(const std::vector<const char *> &libs) {
void *handle;
for (auto lib : libs) {
handle = dlopen(lib, RTLD_LAZY | RTLD_LOCAL);
if (handle) {
return handle;
}
}
std::stringstream ss;
ss << "Couldn't find any of the following libraries: ["sv << libs.front();
std::for_each(std::begin(libs) + 1, std::end(libs), [&](auto lib) {
ss << ", "sv << lib;
});
ss << ']';
BOOST_LOG(error) << ss.str();
return nullptr;
}
int
load(void *handle, const std::vector<std::tuple<apiproc *, const char *>> &funcs, bool strict) {
int err = 0;
for (auto &func : funcs) {
TUPLE_2D_REF(fn, name, func);
*fn = SUNSHINE_GNUC_EXTENSION(apiproc) dlsym(handle, name);
if (!*fn && strict) {
BOOST_LOG(error) << "Couldn't find function: "sv << name;
err = -1;
}
}
return err;
}
} // namespace dyn
namespace platf {
using ifaddr_t = util::safe_ptr<ifaddrs, freeifaddrs>;
ifaddr_t
get_ifaddrs() {
ifaddrs *p { nullptr };
getifaddrs(&p);
return ifaddr_t { p };
}
/**
* @brief Performs migration if necessary, then returns the appdata directory.
* @details This is used for the log directory, so it cannot invoke Boost logging!
* @return The path of the appdata directory that should be used.
*/
fs::path
appdata() {
static std::once_flag migration_flag;
static fs::path config_path;
// Ensure migration is only attempted once
std::call_once(migration_flag, []() {
bool found = false;
bool migrate_config = true;
const char *dir;
const char *homedir;
const char *migrate_envvar;
// Get the home directory
if ((homedir = getenv("HOME")) == nullptr || strlen(homedir) == 0) {
// If HOME is empty or not set, use the current user's home directory
homedir = getpwuid(geteuid())->pw_dir;
}
// May be set if running under a systemd service with the ConfigurationDirectory= option set.
if ((dir = getenv("CONFIGURATION_DIRECTORY")) != nullptr && strlen(dir) > 0) {
found = true;
config_path = fs::path(dir) / "sunshine"sv;
}
// Otherwise, follow the XDG base directory specification:
// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
if (!found && (dir = getenv("XDG_CONFIG_HOME")) != nullptr && strlen(dir) > 0) {
found = true;
config_path = fs::path(dir) / "sunshine"sv;
}
// As a last resort, use the home directory
if (!found) {
migrate_config = false;
config_path = fs::path(homedir) / ".config/sunshine"sv;
}
// migrate from the old config location if necessary
migrate_envvar = getenv("SUNSHINE_MIGRATE_CONFIG");
if (migrate_config && found && migrate_envvar && strcmp(migrate_envvar, "1") == 0) {
std::error_code ec;
fs::path old_config_path = fs::path(homedir) / ".config/sunshine"sv;
if (old_config_path != config_path && fs::exists(old_config_path, ec)) {
if (!fs::exists(config_path, ec)) {
std::cout << "Migrating config from "sv << old_config_path << " to "sv << config_path << std::endl;
if (!ec) {
// Create the new directory tree if it doesn't already exist
fs::create_directories(config_path, ec);
}
if (!ec) {
// Copy the old directory into the new location
// NB: We use a copy instead of a move so that cross-volume migrations work
fs::copy(old_config_path, config_path, fs::copy_options::recursive | fs::copy_options::copy_symlinks, ec);
}
if (!ec) {
// If the copy was successful, delete the original directory
fs::remove_all(old_config_path, ec);
if (ec) {
std::cerr << "Failed to clean up old config directory: " << ec.message() << std::endl;
// This is not fatal. Next time we start, we'll warn the user to delete the old one.
ec.clear();
}
}
if (ec) {
std::cerr << "Migration failed: " << ec.message() << std::endl;
config_path = old_config_path;
}
}
else {
// We cannot use Boost logging because it hasn't been initialized yet!
std::cerr << "Config exists in both "sv << old_config_path << " and "sv << config_path << ". Using "sv << config_path << " for config" << std::endl;
std::cerr << "It is recommended to remove "sv << old_config_path << std::endl;
}
}
}
});
return config_path;
}
std::string
from_sockaddr(const sockaddr *const ip_addr) {
char data[INET6_ADDRSTRLEN] = {};
auto family = ip_addr->sa_family;
if (family == AF_INET6) {
inet_ntop(AF_INET6, &((sockaddr_in6 *) ip_addr)->sin6_addr, data,
INET6_ADDRSTRLEN);
}
else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *) ip_addr)->sin_addr, data,
INET_ADDRSTRLEN);
}
return std::string { data };
}
std::pair<std::uint16_t, std::string>
from_sockaddr_ex(const sockaddr *const ip_addr) {
char data[INET6_ADDRSTRLEN] = {};
auto family = ip_addr->sa_family;
std::uint16_t port = 0;
if (family == AF_INET6) {
inet_ntop(AF_INET6, &((sockaddr_in6 *) ip_addr)->sin6_addr, data,
INET6_ADDRSTRLEN);
port = ((sockaddr_in6 *) ip_addr)->sin6_port;
}
else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *) ip_addr)->sin_addr, data,
INET_ADDRSTRLEN);
port = ((sockaddr_in *) ip_addr)->sin_port;
}
return { port, std::string { data } };
}
std::string
get_mac_address(const std::string_view &address) {
auto ifaddrs = get_ifaddrs();
for (auto pos = ifaddrs.get(); pos != nullptr; pos = pos->ifa_next) {
if (pos->ifa_addr && address == from_sockaddr(pos->ifa_addr)) {
std::ifstream mac_file("/sys/class/net/"s + pos->ifa_name + "/address");
if (mac_file.good()) {
std::string mac_address;
std::getline(mac_file, mac_address);
return mac_address;
}
}
}
BOOST_LOG(warning) << "Unable to find MAC address for "sv << address;
return "00:00:00:00:00:00"s;
}
bp::child
run_command(bool elevated, bool interactive, const std::string &cmd, boost::filesystem::path &working_dir, const bp::environment &env, FILE *file, std::error_code &ec, bp::group *group) {
// clang-format off
if (!group) {
if (!file) {
return bp::child(cmd, env, bp::start_dir(working_dir), bp::std_in < bp::null, bp::std_out > bp::null, bp::std_err > bp::null, bp::limit_handles, ec);
}
else {
return bp::child(cmd, env, bp::start_dir(working_dir), bp::std_in < bp::null, bp::std_out > file, bp::std_err > file, bp::limit_handles, ec);
}
}
else {
if (!file) {
return bp::child(cmd, env, bp::start_dir(working_dir), bp::std_in < bp::null, bp::std_out > bp::null, bp::std_err > bp::null, bp::limit_handles, ec, *group);
}
else {
return bp::child(cmd, env, bp::start_dir(working_dir), bp::std_in < bp::null, bp::std_out > file, bp::std_err > file, bp::limit_handles, ec, *group);
}
}
// clang-format on
}
/**
* @brief Open a url in the default web browser.
* @param url The url to open.
*/
void
open_url(const std::string &url) {
// set working dir to user home directory
auto working_dir = boost::filesystem::path(std::getenv("HOME"));
std::string cmd = R"(xdg-open ")" + url + R"(")";
boost::process::v1::environment _env = boost::this_process::environment();
std::error_code ec;
auto child = run_command(false, false, cmd, working_dir, _env, nullptr, ec, nullptr);
if (ec) {
BOOST_LOG(warning) << "Couldn't open url ["sv << url << "]: System: "sv << ec.message();
}
else {
BOOST_LOG(info) << "Opened url ["sv << url << "]"sv;
child.detach();
}
}
void
adjust_thread_priority(thread_priority_e priority) {
// Unimplemented
}
void
streaming_will_start() {
// Nothing to do
}
void
streaming_will_stop() {
// Nothing to do
}
void
restart_on_exit() {
char executable[PATH_MAX];
ssize_t len = readlink("/proc/self/exe", executable, PATH_MAX - 1);
if (len == -1) {
BOOST_LOG(fatal) << "readlink() failed: "sv << errno;
return;
}
executable[len] = '\0';
// ASIO doesn't use O_CLOEXEC, so we have to close all fds ourselves
int openmax = (int) sysconf(_SC_OPEN_MAX);
for (int fd = STDERR_FILENO + 1; fd < openmax; fd++) {
close(fd);
}
// Re-exec ourselves with the same arguments
if (execv(executable, lifetime::get_argv()) < 0) {
BOOST_LOG(fatal) << "execv() failed: "sv << errno;
return;
}
}
void
restart() {
// Gracefully clean up and restart ourselves instead of exiting
atexit(restart_on_exit);
lifetime::exit_sunshine(0, true);
}
int
set_env(const std::string &name, const std::string &value) {
return setenv(name.c_str(), value.c_str(), 1);
}
int
unset_env(const std::string &name) {
return unsetenv(name.c_str());
}
bool
request_process_group_exit(std::uintptr_t native_handle) {
if (kill(-((pid_t) native_handle), SIGTERM) == 0 || errno == ESRCH) {
BOOST_LOG(debug) << "Successfully sent SIGTERM to process group: "sv << native_handle;
return true;
}
else {
BOOST_LOG(warning) << "Unable to send SIGTERM to process group ["sv << native_handle << "]: "sv << errno;
return false;
}
}
bool
process_group_running(std::uintptr_t native_handle) {
return waitpid(-((pid_t) native_handle), nullptr, WNOHANG) >= 0;
}
struct sockaddr_in
to_sockaddr(boost::asio::ip::address_v4 address, uint16_t port) {
struct sockaddr_in saddr_v4 = {};
saddr_v4.sin_family = AF_INET;
saddr_v4.sin_port = htons(port);
auto addr_bytes = address.to_bytes();
memcpy(&saddr_v4.sin_addr, addr_bytes.data(), sizeof(saddr_v4.sin_addr));
return saddr_v4;
}
struct sockaddr_in6
to_sockaddr(boost::asio::ip::address_v6 address, uint16_t port) {
struct sockaddr_in6 saddr_v6 = {};
saddr_v6.sin6_family = AF_INET6;
saddr_v6.sin6_port = htons(port);
saddr_v6.sin6_scope_id = address.scope_id();
auto addr_bytes = address.to_bytes();
memcpy(&saddr_v6.sin6_addr, addr_bytes.data(), sizeof(saddr_v6.sin6_addr));
return saddr_v6;
}
bool
send_batch(batched_send_info_t &send_info) {
auto sockfd = (int) send_info.native_socket;
struct msghdr msg = {};
// Convert the target address into a sockaddr
struct sockaddr_in taddr_v4 = {};
struct sockaddr_in6 taddr_v6 = {};
if (send_info.target_address.is_v6()) {
taddr_v6 = to_sockaddr(send_info.target_address.to_v6(), send_info.target_port);
msg.msg_name = (struct sockaddr *) &taddr_v6;
msg.msg_namelen = sizeof(taddr_v6);
}
else {
taddr_v4 = to_sockaddr(send_info.target_address.to_v4(), send_info.target_port);
msg.msg_name = (struct sockaddr *) &taddr_v4;
msg.msg_namelen = sizeof(taddr_v4);
}
union {
char buf[CMSG_SPACE(sizeof(uint16_t)) +
std::max(CMSG_SPACE(sizeof(struct in_pktinfo)), CMSG_SPACE(sizeof(struct in6_pktinfo)))];
struct cmsghdr alignment;
} cmbuf = {}; // Must be zeroed for CMSG_NXTHDR()
socklen_t cmbuflen = 0;
msg.msg_control = cmbuf.buf;
msg.msg_controllen = sizeof(cmbuf.buf);
// The PKTINFO option will always be first, then we will conditionally
// append the UDP_SEGMENT option next if applicable.
auto pktinfo_cm = CMSG_FIRSTHDR(&msg);
if (send_info.source_address.is_v6()) {
struct in6_pktinfo pktInfo;
struct sockaddr_in6 saddr_v6 = to_sockaddr(send_info.source_address.to_v6(), 0);
pktInfo.ipi6_addr = saddr_v6.sin6_addr;
pktInfo.ipi6_ifindex = 0;
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IPV6;
pktinfo_cm->cmsg_type = IPV6_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
}
else {
struct in_pktinfo pktInfo;
struct sockaddr_in saddr_v4 = to_sockaddr(send_info.source_address.to_v4(), 0);
pktInfo.ipi_spec_dst = saddr_v4.sin_addr;
pktInfo.ipi_ifindex = 0;
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IP;
pktinfo_cm->cmsg_type = IP_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
}
auto const max_iovs_per_msg = send_info.payload_buffers.size() + (send_info.headers ? 1 : 0);
#ifdef UDP_SEGMENT
{
// UDP GSO on Linux currently only supports sending 64K or 64 segments at a time
size_t seg_index = 0;
const size_t seg_max = 65536 / 1500;
struct iovec iovs[(send_info.headers ? std::min(seg_max, send_info.block_count) : 1) * max_iovs_per_msg] = {};
auto msg_size = send_info.header_size + send_info.payload_size;
while (seg_index < send_info.block_count) {
int iovlen = 0;
auto segs_in_batch = std::min(send_info.block_count - seg_index, seg_max);
if (send_info.headers) {
// Interleave iovs for headers and payloads
for (auto i = 0; i < segs_in_batch; i++) {
iovs[iovlen].iov_base = (void *) &send_info.headers[(send_info.block_offset + seg_index + i) * send_info.header_size];
iovs[iovlen].iov_len = send_info.header_size;
iovlen++;
auto payload_desc = send_info.buffer_for_payload_offset((send_info.block_offset + seg_index + i) * send_info.payload_size);
iovs[iovlen].iov_base = (void *) payload_desc.buffer;
iovs[iovlen].iov_len = send_info.payload_size;
iovlen++;
}
}
else {
// Translate buffer descriptors into iovs
auto payload_offset = (send_info.block_offset + seg_index) * send_info.payload_size;
auto payload_length = payload_offset + (segs_in_batch * send_info.payload_size);
while (payload_offset < payload_length) {
auto payload_desc = send_info.buffer_for_payload_offset(payload_offset);
iovs[iovlen].iov_base = (void *) payload_desc.buffer;
iovs[iovlen].iov_len = std::min(payload_desc.size, payload_length - payload_offset);
payload_offset += iovs[iovlen].iov_len;
iovlen++;
}
}
msg.msg_iov = iovs;
msg.msg_iovlen = iovlen;
// We should not use GSO if the data is <= one full block size
if (segs_in_batch > 1) {
msg.msg_controllen = cmbuflen + CMSG_SPACE(sizeof(uint16_t));
// Enable GSO to perform segmentation of our buffer for us
auto cm = CMSG_NXTHDR(&msg, pktinfo_cm);
cm->cmsg_level = SOL_UDP;
cm->cmsg_type = UDP_SEGMENT;
cm->cmsg_len = CMSG_LEN(sizeof(uint16_t));
*((uint16_t *) CMSG_DATA(cm)) = msg_size;
}
else {
msg.msg_controllen = cmbuflen;
}
// This will fail if GSO is not available, so we will fall back to non-GSO if
// it's the first sendmsg() call. On subsequent calls, we will treat errors as
// actual failures and return to the caller.
auto bytes_sent = sendmsg(sockfd, &msg, 0);
if (bytes_sent < 0) {
// If there's no send buffer space, wait for some to be available
if (errno == EAGAIN) {
struct pollfd pfd;
pfd.fd = sockfd;
pfd.events = POLLOUT;
if (poll(&pfd, 1, -1) != 1) {
BOOST_LOG(warning) << "poll() failed: "sv << errno;
break;
}
// Try to send again
continue;
}
BOOST_LOG(verbose) << "sendmsg() failed: "sv << errno;
break;
}
seg_index += bytes_sent / msg_size;
}
// If we sent something, return the status and don't fall back to the non-GSO path.
if (seg_index != 0) {
return seg_index >= send_info.block_count;
}
}
#endif
{
// If GSO is not supported, use sendmmsg() instead.
struct mmsghdr msgs[send_info.block_count] = {};
struct iovec iovs[send_info.block_count * (send_info.headers ? 2 : 1)] = {};
int iov_idx = 0;
for (size_t i = 0; i < send_info.block_count; i++) {
msgs[i].msg_hdr.msg_iov = &iovs[iov_idx];
msgs[i].msg_hdr.msg_iovlen = send_info.headers ? 2 : 1;
if (send_info.headers) {
iovs[iov_idx].iov_base = (void *) &send_info.headers[(send_info.block_offset + i) * send_info.header_size];
iovs[iov_idx].iov_len = send_info.header_size;
iov_idx++;
}
auto payload_desc = send_info.buffer_for_payload_offset((send_info.block_offset + i) * send_info.payload_size);
iovs[iov_idx].iov_base = (void *) payload_desc.buffer;
iovs[iov_idx].iov_len = send_info.payload_size;
iov_idx++;
msgs[i].msg_hdr.msg_name = msg.msg_name;
msgs[i].msg_hdr.msg_namelen = msg.msg_namelen;
msgs[i].msg_hdr.msg_control = cmbuf.buf;
msgs[i].msg_hdr.msg_controllen = cmbuflen;
}
// Call sendmmsg() until all messages are sent
size_t blocks_sent = 0;
while (blocks_sent < send_info.block_count) {
int msgs_sent = sendmmsg(sockfd, &msgs[blocks_sent], send_info.block_count - blocks_sent, 0);
if (msgs_sent < 0) {
// If there's no send buffer space, wait for some to be available
if (errno == EAGAIN) {
struct pollfd pfd;
pfd.fd = sockfd;
pfd.events = POLLOUT;
if (poll(&pfd, 1, -1) != 1) {
BOOST_LOG(warning) << "poll() failed: "sv << errno;
break;
}
// Try to send again
continue;
}
BOOST_LOG(warning) << "sendmmsg() failed: "sv << errno;
return false;
}
blocks_sent += msgs_sent;
}
return true;
}
}
bool
send(send_info_t &send_info) {
auto sockfd = (int) send_info.native_socket;
struct msghdr msg = {};
// Convert the target address into a sockaddr
struct sockaddr_in taddr_v4 = {};
struct sockaddr_in6 taddr_v6 = {};
if (send_info.target_address.is_v6()) {
taddr_v6 = to_sockaddr(send_info.target_address.to_v6(), send_info.target_port);
msg.msg_name = (struct sockaddr *) &taddr_v6;
msg.msg_namelen = sizeof(taddr_v6);
}
else {
taddr_v4 = to_sockaddr(send_info.target_address.to_v4(), send_info.target_port);
msg.msg_name = (struct sockaddr *) &taddr_v4;
msg.msg_namelen = sizeof(taddr_v4);
}
union {
char buf[std::max(CMSG_SPACE(sizeof(struct in_pktinfo)), CMSG_SPACE(sizeof(struct in6_pktinfo)))];
struct cmsghdr alignment;
} cmbuf;
socklen_t cmbuflen = 0;
msg.msg_control = cmbuf.buf;
msg.msg_controllen = sizeof(cmbuf.buf);
auto pktinfo_cm = CMSG_FIRSTHDR(&msg);
if (send_info.source_address.is_v6()) {
struct in6_pktinfo pktInfo;
struct sockaddr_in6 saddr_v6 = to_sockaddr(send_info.source_address.to_v6(), 0);
pktInfo.ipi6_addr = saddr_v6.sin6_addr;
pktInfo.ipi6_ifindex = 0;
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IPV6;
pktinfo_cm->cmsg_type = IPV6_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
}
else {
struct in_pktinfo pktInfo;
struct sockaddr_in saddr_v4 = to_sockaddr(send_info.source_address.to_v4(), 0);
pktInfo.ipi_spec_dst = saddr_v4.sin_addr;
pktInfo.ipi_ifindex = 0;
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IP;
pktinfo_cm->cmsg_type = IP_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
}
struct iovec iovs[2] = {};
int iovlen = 0;
if (send_info.header) {
iovs[iovlen].iov_base = (void *) send_info.header;
iovs[iovlen].iov_len = send_info.header_size;
iovlen++;
}
iovs[iovlen].iov_base = (void *) send_info.payload;
iovs[iovlen].iov_len = send_info.payload_size;
iovlen++;
msg.msg_iov = iovs;
msg.msg_iovlen = iovlen;
msg.msg_controllen = cmbuflen;
auto bytes_sent = sendmsg(sockfd, &msg, 0);
// If there's no send buffer space, wait for some to be available
while (bytes_sent < 0 && errno == EAGAIN) {
struct pollfd pfd;
pfd.fd = sockfd;
pfd.events = POLLOUT;
if (poll(&pfd, 1, -1) != 1) {
BOOST_LOG(warning) << "poll() failed: "sv << errno;
break;
}
// Try to send again
bytes_sent = sendmsg(sockfd, &msg, 0);
}
if (bytes_sent < 0) {
BOOST_LOG(warning) << "sendmsg() failed: "sv << errno;
return false;
}
return true;
}
// We can't track QoS state separately for each destination on this OS,
// so we keep a ref count to only disable QoS options when all clients
// are disconnected.
static std::atomic<int> qos_ref_count = 0;
class qos_t: public deinit_t {
public:
qos_t(int sockfd, std::vector<std::tuple<int, int, int>> options):
sockfd(sockfd), options(options) {
qos_ref_count++;
}
virtual ~qos_t() {
if (--qos_ref_count == 0) {
for (const auto &tuple : options) {
auto reset_val = std::get<2>(tuple);
if (setsockopt(sockfd, std::get<0>(tuple), std::get<1>(tuple), &reset_val, sizeof(reset_val)) < 0) {
BOOST_LOG(warning) << "Failed to reset option: "sv << errno;
}
}
}
}
private:
int sockfd;
std::vector<std::tuple<int, int, int>> options;
};
/**
* @brief Enables QoS on the given socket for traffic to the specified destination.
* @param native_socket The native socket handle.
* @param address The destination address for traffic sent on this socket.
* @param port The destination port for traffic sent on this socket.
* @param data_type The type of traffic sent on this socket.
* @param dscp_tagging Specifies whether to enable DSCP tagging on outgoing traffic.
*/
std::unique_ptr<deinit_t>
enable_socket_qos(uintptr_t native_socket, boost::asio::ip::address &address, uint16_t port, qos_data_type_e data_type, bool dscp_tagging) {
int sockfd = (int) native_socket;
std::vector<std::tuple<int, int, int>> reset_options;
if (dscp_tagging) {
int level;
int option;
// With dual-stack sockets, Linux uses IPV6_TCLASS for IPv6 traffic
// and IP_TOS for IPv4 traffic.
if (address.is_v6() && !address.to_v6().is_v4_mapped()) {
level = SOL_IPV6;
option = IPV6_TCLASS;
}
else {
level = SOL_IP;
option = IP_TOS;
}
// The specific DSCP values here are chosen to be consistent with Windows,
// except that we use CS6 instead of CS7 for audio traffic.
int dscp = 0;
switch (data_type) {
case qos_data_type_e::video:
dscp = 40;
break;
case qos_data_type_e::audio:
dscp = 48;
break;
default:
BOOST_LOG(error) << "Unknown traffic type: "sv << (int) data_type;
break;
}
if (dscp) {
// Shift to put the DSCP value in the correct position in the TOS field
dscp <<= 2;
if (setsockopt(sockfd, level, option, &dscp, sizeof(dscp)) == 0) {
// Reset TOS to -1 when QoS is disabled
reset_options.emplace_back(std::make_tuple(level, option, -1));
}
else {
BOOST_LOG(error) << "Failed to set TOS/TCLASS: "sv << errno;
}
}
}
// We can use SO_PRIORITY to set outgoing traffic priority without DSCP tagging.
//
// NB: We set this after IP_TOS/IPV6_TCLASS since setting TOS value seems to
// reset SO_PRIORITY back to 0.
//
// 6 is the highest priority that can be used without SYS_CAP_ADMIN.
int priority = data_type == qos_data_type_e::audio ? 6 : 5;
if (setsockopt(sockfd, SOL_SOCKET, SO_PRIORITY, &priority, sizeof(priority)) == 0) {
// Reset SO_PRIORITY to 0 when QoS is disabled
reset_options.emplace_back(std::make_tuple(SOL_SOCKET, SO_PRIORITY, 0));
}
else {
BOOST_LOG(error) << "Failed to set SO_PRIORITY: "sv << errno;
}
return std::make_unique<qos_t>(sockfd, reset_options);
}
std::string
get_host_name() {
try {
return boost::asio::ip::host_name();
}
catch (boost::system::system_error &err) {
BOOST_LOG(error) << "Failed to get hostname: "sv << err.what();
return "Sunshine"s;
}
}
namespace source {
enum source_e : std::size_t {
#ifdef SUNSHINE_BUILD_CUDA
NVFBC, ///< NvFBC
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
WAYLAND, ///< Wayland
#endif
#ifdef SUNSHINE_BUILD_DRM
KMS, ///< KMS
#endif
#ifdef SUNSHINE_BUILD_X11
X11, ///< X11
#endif
MAX_FLAGS ///< The maximum number of flags
};
} // namespace source
static std::bitset<source::MAX_FLAGS> sources;
#ifdef SUNSHINE_BUILD_CUDA
std::vector<std::string>
nvfbc_display_names();
std::shared_ptr<display_t>
nvfbc_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
bool
verify_nvfbc() {
return !nvfbc_display_names().empty();
}
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
std::vector<std::string>
wl_display_names();
std::shared_ptr<display_t>
wl_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
bool
verify_wl() {
return window_system == window_system_e::WAYLAND && !wl_display_names().empty();
}
#endif
#ifdef SUNSHINE_BUILD_DRM
std::vector<std::string>
kms_display_names(mem_type_e hwdevice_type);
std::shared_ptr<display_t>
kms_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
bool
verify_kms() {
return !kms_display_names(mem_type_e::unknown).empty();
}
#endif
#ifdef SUNSHINE_BUILD_X11
std::vector<std::string>
x11_display_names();
std::shared_ptr<display_t>
x11_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
bool
verify_x11() {
return window_system == window_system_e::X11 && !x11_display_names().empty();
}
#endif
std::vector<std::string>
display_names(mem_type_e hwdevice_type) {
#ifdef SUNSHINE_BUILD_CUDA
// display using NvFBC only supports mem_type_e::cuda
if (sources[source::NVFBC] && hwdevice_type == mem_type_e::cuda) return nvfbc_display_names();
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
if (sources[source::WAYLAND]) return wl_display_names();
#endif
#ifdef SUNSHINE_BUILD_DRM
if (sources[source::KMS]) return kms_display_names(hwdevice_type);
#endif
#ifdef SUNSHINE_BUILD_X11
if (sources[source::X11]) return x11_display_names();
#endif
return {};
}
/**
* @brief Returns if GPUs/drivers have changed since the last call to this function.
* @return `true` if a change has occurred or if it is unknown whether a change occurred.
*/
bool
needs_encoder_reenumeration() {
// We don't track GPU state, so we will always reenumerate. Fortunately, it is fast on Linux.
return true;
}
std::shared_ptr<display_t>
display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config) {
#ifdef SUNSHINE_BUILD_CUDA
if (sources[source::NVFBC] && hwdevice_type == mem_type_e::cuda) {
BOOST_LOG(info) << "Screencasting with NvFBC"sv;
return nvfbc_display(hwdevice_type, display_name, config);
}
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
if (sources[source::WAYLAND]) {
BOOST_LOG(info) << "Screencasting with Wayland's protocol"sv;
return wl_display(hwdevice_type, display_name, config);
}
#endif
#ifdef SUNSHINE_BUILD_DRM
if (sources[source::KMS]) {
BOOST_LOG(info) << "Screencasting with KMS"sv;
return kms_display(hwdevice_type, display_name, config);
}
#endif
#ifdef SUNSHINE_BUILD_X11
if (sources[source::X11]) {
BOOST_LOG(info) << "Screencasting with X11"sv;
return x11_display(hwdevice_type, display_name, config);
}
#endif
return nullptr;
}
std::unique_ptr<deinit_t>
init() {
// enable low latency mode for AMD
// https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30039
set_env("AMD_DEBUG", "lowlatencyenc");
// These are allowed to fail.
gbm::init();
window_system = window_system_e::NONE;
#ifdef SUNSHINE_BUILD_WAYLAND
if (std::getenv("WAYLAND_DISPLAY")) {
window_system = window_system_e::WAYLAND;
}
#endif
#if defined(SUNSHINE_BUILD_X11) || defined(SUNSHINE_BUILD_CUDA)
if (std::getenv("DISPLAY") && window_system != window_system_e::WAYLAND) {
if (std::getenv("WAYLAND_DISPLAY")) {
BOOST_LOG(warning) << "Wayland detected, yet sunshine will use X11 for screencasting, screencasting will only work on XWayland applications"sv;
}
window_system = window_system_e::X11;
}
#endif
#ifdef SUNSHINE_BUILD_CUDA
if ((config::video.capture.empty() && sources.none()) || config::video.capture == "nvfbc") {
if (verify_nvfbc()) {
sources[source::NVFBC] = true;
}
}
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
if ((config::video.capture.empty() && sources.none()) || config::video.capture == "wlr") {
if (verify_wl()) {
sources[source::WAYLAND] = true;
}
}
#endif
#ifdef SUNSHINE_BUILD_DRM
if ((config::video.capture.empty() && sources.none()) || config::video.capture == "kms") {
if (verify_kms()) {
sources[source::KMS] = true;
}
}
#endif
#ifdef SUNSHINE_BUILD_X11
// We enumerate this capture backend regardless of other suitable sources,
// since it may be needed as a NvFBC fallback for software encoding on X11.
if (config::video.capture.empty() || config::video.capture == "x11") {
if (verify_x11()) {
sources[source::X11] = true;
}
}
#endif
if (sources.none()) {
BOOST_LOG(error) << "Unable to initialize capture method"sv;
return nullptr;
}
if (!gladLoaderLoadEGL(EGL_NO_DISPLAY) || !eglGetPlatformDisplay) {
BOOST_LOG(warning) << "Couldn't load EGL library"sv;
}
return std::make_unique<deinit_t>();
}
class linux_high_precision_timer: public high_precision_timer {
public:
void
sleep_for(const std::chrono::nanoseconds &duration) override {
std::this_thread::sleep_for(duration);
}
operator bool() override {
return true;
}
};
std::unique_ptr<high_precision_timer>
create_high_precision_timer() {
return std::make_unique<linux_high_precision_timer>();
}
} // namespace platf
| 32,388
|
C++
|
.cpp
| 868
| 31.095622
| 189
| 0.62013
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,002
|
x11grab.cpp
|
LizardByte_Sunshine/src/platform/linux/x11grab.cpp
|
/**
* @file src/platform/linux/x11grab.cpp
* @brief Definitions for x11 capture.
*/
#include "src/platform/common.h"
#include <fstream>
#include <thread>
#include <X11/X.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/extensions/Xfixes.h>
#include <X11/extensions/Xrandr.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <xcb/shm.h>
#include <xcb/xfixes.h>
#include "src/config.h"
#include "src/globals.h"
#include "src/logging.h"
#include "src/task_pool.h"
#include "src/video.h"
#include "cuda.h"
#include "graphics.h"
#include "misc.h"
#include "vaapi.h"
#include "x11grab.h"
using namespace std::literals;
namespace platf {
int
load_xcb();
int
load_x11();
namespace x11 {
#define _FN(x, ret, args) \
typedef ret(*x##_fn) args; \
static x##_fn x
_FN(GetImage, XImage *,
(
Display * display,
Drawable d,
int x, int y,
unsigned int width, unsigned int height,
unsigned long plane_mask,
int format));
_FN(OpenDisplay, Display *, (_Xconst char *display_name));
_FN(GetWindowAttributes, Status,
(
Display * display,
Window w,
XWindowAttributes *window_attributes_return));
_FN(CloseDisplay, int, (Display * display));
_FN(Free, int, (void *data));
_FN(InitThreads, Status, (void) );
namespace rr {
_FN(GetScreenResources, XRRScreenResources *, (Display * dpy, Window window));
_FN(GetOutputInfo, XRROutputInfo *, (Display * dpy, XRRScreenResources *resources, RROutput output));
_FN(GetCrtcInfo, XRRCrtcInfo *, (Display * dpy, XRRScreenResources *resources, RRCrtc crtc));
_FN(FreeScreenResources, void, (XRRScreenResources * resources));
_FN(FreeOutputInfo, void, (XRROutputInfo * outputInfo));
_FN(FreeCrtcInfo, void, (XRRCrtcInfo * crtcInfo));
static int
init() {
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libXrandr.so.2", "libXrandr.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &GetScreenResources, "XRRGetScreenResources" },
{ (dyn::apiproc *) &GetOutputInfo, "XRRGetOutputInfo" },
{ (dyn::apiproc *) &GetCrtcInfo, "XRRGetCrtcInfo" },
{ (dyn::apiproc *) &FreeScreenResources, "XRRFreeScreenResources" },
{ (dyn::apiproc *) &FreeOutputInfo, "XRRFreeOutputInfo" },
{ (dyn::apiproc *) &FreeCrtcInfo, "XRRFreeCrtcInfo" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
} // namespace rr
namespace fix {
_FN(GetCursorImage, XFixesCursorImage *, (Display * dpy));
static int
init() {
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libXfixes.so.3", "libXfixes.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &GetCursorImage, "XFixesGetCursorImage" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
} // namespace fix
static int
init() {
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libX11.so.6", "libX11.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &GetImage, "XGetImage" },
{ (dyn::apiproc *) &OpenDisplay, "XOpenDisplay" },
{ (dyn::apiproc *) &GetWindowAttributes, "XGetWindowAttributes" },
{ (dyn::apiproc *) &Free, "XFree" },
{ (dyn::apiproc *) &CloseDisplay, "XCloseDisplay" },
{ (dyn::apiproc *) &InitThreads, "XInitThreads" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
} // namespace x11
namespace xcb {
static xcb_extension_t *shm_id;
_FN(shm_get_image_reply, xcb_shm_get_image_reply_t *,
(
xcb_connection_t * c,
xcb_shm_get_image_cookie_t cookie,
xcb_generic_error_t **e));
_FN(shm_get_image_unchecked, xcb_shm_get_image_cookie_t,
(
xcb_connection_t * c,
xcb_drawable_t drawable,
int16_t x, int16_t y,
uint16_t width, uint16_t height,
uint32_t plane_mask,
uint8_t format,
xcb_shm_seg_t shmseg,
uint32_t offset));
_FN(shm_attach, xcb_void_cookie_t,
(xcb_connection_t * c,
xcb_shm_seg_t shmseg,
uint32_t shmid,
uint8_t read_only));
_FN(get_extension_data, xcb_query_extension_reply_t *,
(xcb_connection_t * c, xcb_extension_t *ext));
_FN(get_setup, xcb_setup_t *, (xcb_connection_t * c));
_FN(disconnect, void, (xcb_connection_t * c));
_FN(connection_has_error, int, (xcb_connection_t * c));
_FN(connect, xcb_connection_t *, (const char *displayname, int *screenp));
_FN(setup_roots_iterator, xcb_screen_iterator_t, (const xcb_setup_t *R));
_FN(generate_id, std::uint32_t, (xcb_connection_t * c));
int
init_shm() {
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libxcb-shm.so.0", "libxcb-shm.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &shm_id, "xcb_shm_id" },
{ (dyn::apiproc *) &shm_get_image_reply, "xcb_shm_get_image_reply" },
{ (dyn::apiproc *) &shm_get_image_unchecked, "xcb_shm_get_image_unchecked" },
{ (dyn::apiproc *) &shm_attach, "xcb_shm_attach" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
int
init() {
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libxcb.so.1", "libxcb.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &get_extension_data, "xcb_get_extension_data" },
{ (dyn::apiproc *) &get_setup, "xcb_get_setup" },
{ (dyn::apiproc *) &disconnect, "xcb_disconnect" },
{ (dyn::apiproc *) &connection_has_error, "xcb_connection_has_error" },
{ (dyn::apiproc *) &connect, "xcb_connect" },
{ (dyn::apiproc *) &setup_roots_iterator, "xcb_setup_roots_iterator" },
{ (dyn::apiproc *) &generate_id, "xcb_generate_id" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
#undef _FN
} // namespace xcb
void
freeImage(XImage *);
void
freeX(XFixesCursorImage *);
using xcb_connect_t = util::dyn_safe_ptr<xcb_connection_t, &xcb::disconnect>;
using xcb_img_t = util::c_ptr<xcb_shm_get_image_reply_t>;
using ximg_t = util::safe_ptr<XImage, freeImage>;
using xcursor_t = util::safe_ptr<XFixesCursorImage, freeX>;
using crtc_info_t = util::dyn_safe_ptr<_XRRCrtcInfo, &x11::rr::FreeCrtcInfo>;
using output_info_t = util::dyn_safe_ptr<_XRROutputInfo, &x11::rr::FreeOutputInfo>;
using screen_res_t = util::dyn_safe_ptr<_XRRScreenResources, &x11::rr::FreeScreenResources>;
class shm_id_t {
public:
shm_id_t():
id { -1 } {}
shm_id_t(int id):
id { id } {}
shm_id_t(shm_id_t &&other) noexcept:
id(other.id) {
other.id = -1;
}
~shm_id_t() {
if (id != -1) {
shmctl(id, IPC_RMID, nullptr);
id = -1;
}
}
int id;
};
class shm_data_t {
public:
shm_data_t():
data { (void *) -1 } {}
shm_data_t(void *data):
data { data } {}
shm_data_t(shm_data_t &&other) noexcept:
data(other.data) {
other.data = (void *) -1;
}
~shm_data_t() {
if ((std::uintptr_t) data != -1) {
shmdt(data);
}
}
void *data;
};
struct x11_img_t: public img_t {
ximg_t img;
};
struct shm_img_t: public img_t {
~shm_img_t() override {
delete[] data;
data = nullptr;
}
};
static void
blend_cursor(Display *display, img_t &img, int offsetX, int offsetY) {
xcursor_t overlay { x11::fix::GetCursorImage(display) };
if (!overlay) {
BOOST_LOG(error) << "Couldn't get cursor from XFixesGetCursorImage"sv;
return;
}
overlay->x -= overlay->xhot;
overlay->y -= overlay->yhot;
overlay->x -= offsetX;
overlay->y -= offsetY;
overlay->x = std::max((short) 0, overlay->x);
overlay->y = std::max((short) 0, overlay->y);
auto pixels = (int *) img.data;
auto screen_height = img.height;
auto screen_width = img.width;
auto delta_height = std::min<uint16_t>(overlay->height, std::max(0, screen_height - overlay->y));
auto delta_width = std::min<uint16_t>(overlay->width, std::max(0, screen_width - overlay->x));
for (auto y = 0; y < delta_height; ++y) {
auto overlay_begin = &overlay->pixels[y * overlay->width];
auto overlay_end = &overlay->pixels[y * overlay->width + delta_width];
auto pixels_begin = &pixels[(y + overlay->y) * (img.row_pitch / img.pixel_pitch) + overlay->x];
std::for_each(overlay_begin, overlay_end, [&](long pixel) {
int *pixel_p = (int *) &pixel;
auto colors_in = (uint8_t *) pixels_begin;
auto alpha = (*(uint *) pixel_p) >> 24u;
if (alpha == 255) {
*pixels_begin = *pixel_p;
}
else {
auto colors_out = (uint8_t *) pixel_p;
colors_in[0] = colors_out[0] + (colors_in[0] * (255 - alpha) + 255 / 2) / 255;
colors_in[1] = colors_out[1] + (colors_in[1] * (255 - alpha) + 255 / 2) / 255;
colors_in[2] = colors_out[2] + (colors_in[2] * (255 - alpha) + 255 / 2) / 255;
}
++pixels_begin;
});
}
}
struct x11_attr_t: public display_t {
std::chrono::nanoseconds delay;
x11::xdisplay_t xdisplay;
Window xwindow;
XWindowAttributes xattr;
mem_type_e mem_type;
/**
* Last X (NOT the streamed monitor!) size.
* This way we can trigger reinitialization if the dimensions changed while streaming
*/
// int env_width, env_height;
x11_attr_t(mem_type_e mem_type):
xdisplay { x11::OpenDisplay(nullptr) }, xwindow {}, xattr {}, mem_type { mem_type } {
x11::InitThreads();
}
int
init(const std::string &display_name, const ::video::config_t &config) {
if (!xdisplay) {
BOOST_LOG(error) << "Could not open X11 display"sv;
return -1;
}
delay = std::chrono::nanoseconds { 1s } / config.framerate;
xwindow = DefaultRootWindow(xdisplay.get());
refresh();
int streamedMonitor = -1;
if (!display_name.empty()) {
streamedMonitor = (int) util::from_view(display_name);
}
if (streamedMonitor != -1) {
BOOST_LOG(info) << "Configuring selected display ("sv << streamedMonitor << ") to stream"sv;
screen_res_t screenr { x11::rr::GetScreenResources(xdisplay.get(), xwindow) };
int output = screenr->noutput;
output_info_t result;
int monitor = 0;
for (int x = 0; x < output; ++x) {
output_info_t out_info { x11::rr::GetOutputInfo(xdisplay.get(), screenr.get(), screenr->outputs[x]) };
if (out_info) {
if (monitor++ == streamedMonitor) {
result = std::move(out_info);
break;
}
}
}
if (!result) {
BOOST_LOG(error) << "Could not stream display number ["sv << streamedMonitor << "], there are only ["sv << monitor << "] displays."sv;
return -1;
}
if (result->crtc) {
crtc_info_t crt_info { x11::rr::GetCrtcInfo(xdisplay.get(), screenr.get(), result->crtc) };
BOOST_LOG(info)
<< "Streaming display: "sv << result->name << " with res "sv << crt_info->width << 'x' << crt_info->height << " offset by "sv << crt_info->x << 'x' << crt_info->y;
width = crt_info->width;
height = crt_info->height;
offset_x = crt_info->x;
offset_y = crt_info->y;
}
else {
BOOST_LOG(warning) << "Couldn't get requested display info, defaulting to recording entire virtual desktop"sv;
width = xattr.width;
height = xattr.height;
}
}
else {
width = xattr.width;
height = xattr.height;
}
env_width = xattr.width;
env_height = xattr.height;
return 0;
}
/**
* Called when the display attributes should change.
*/
void
refresh() {
x11::GetWindowAttributes(xdisplay.get(), xwindow, &xattr); // Update xattr's
}
capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override {
auto next_frame = std::chrono::steady_clock::now();
sleep_overshoot_logger.reset();
while (true) {
auto now = std::chrono::steady_clock::now();
if (next_frame > now) {
std::this_thread::sleep_for(next_frame - now);
sleep_overshoot_logger.first_point(next_frame);
sleep_overshoot_logger.second_point_now_and_log();
}
next_frame += delay;
if (next_frame < now) { // some major slowdown happened; we couldn't keep up
next_frame = now + delay;
}
std::shared_ptr<platf::img_t> img_out;
auto status = snapshot(pull_free_image_cb, img_out, 1000ms, *cursor);
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return platf::capture_e::ok;
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return platf::capture_e::ok;
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv << (int) status << ']';
return status;
}
}
return capture_e::ok;
}
capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor) {
refresh();
// The whole X server changed, so we must reinit everything
if (xattr.width != env_width || xattr.height != env_height) {
BOOST_LOG(warning) << "X dimensions changed in non-SHM mode, request reinit"sv;
return capture_e::reinit;
}
if (!pull_free_image_cb(img_out)) {
return platf::capture_e::interrupted;
}
auto img = (x11_img_t *) img_out.get();
XImage *x_img { x11::GetImage(xdisplay.get(), xwindow, offset_x, offset_y, width, height, AllPlanes, ZPixmap) };
img->frame_timestamp = std::chrono::steady_clock::now();
img->width = x_img->width;
img->height = x_img->height;
img->data = (uint8_t *) x_img->data;
img->row_pitch = x_img->bytes_per_line;
img->pixel_pitch = x_img->bits_per_pixel / 8;
img->img.reset(x_img);
if (cursor) {
blend_cursor(xdisplay.get(), *img, offset_x, offset_y);
}
return capture_e::ok;
}
std::shared_ptr<img_t>
alloc_img() override {
return std::make_shared<x11_img_t>();
}
std::unique_ptr<avcodec_encode_device_t>
make_avcodec_encode_device(pix_fmt_e pix_fmt) override {
#ifdef SUNSHINE_BUILD_VAAPI
if (mem_type == mem_type_e::vaapi) {
return va::make_avcodec_encode_device(width, height, false);
}
#endif
#ifdef SUNSHINE_BUILD_CUDA
if (mem_type == mem_type_e::cuda) {
return cuda::make_avcodec_encode_device(width, height, false);
}
#endif
return std::make_unique<avcodec_encode_device_t>();
}
int
dummy_img(img_t *img) override {
// TODO: stop cheating and give black image
if (!img) {
return -1;
};
auto pull_dummy_img_callback = [&img](std::shared_ptr<platf::img_t> &img_out) -> bool {
img_out = img->shared_from_this();
return true;
};
std::shared_ptr<platf::img_t> img_out;
snapshot(pull_dummy_img_callback, img_out, 0s, true);
return 0;
}
};
struct shm_attr_t: public x11_attr_t {
x11::xdisplay_t shm_xdisplay; // Prevent race condition with x11_attr_t::xdisplay
xcb_connect_t xcb;
xcb_screen_t *display;
std::uint32_t seg;
shm_id_t shm_id;
shm_data_t data;
task_pool_util::TaskPool::task_id_t refresh_task_id;
void
delayed_refresh() {
refresh();
refresh_task_id = task_pool.pushDelayed(&shm_attr_t::delayed_refresh, 2s, this).task_id;
}
shm_attr_t(mem_type_e mem_type):
x11_attr_t(mem_type), shm_xdisplay { x11::OpenDisplay(nullptr) } {
refresh_task_id = task_pool.pushDelayed(&shm_attr_t::delayed_refresh, 2s, this).task_id;
}
~shm_attr_t() override {
while (!task_pool.cancel(refresh_task_id));
}
capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override {
auto next_frame = std::chrono::steady_clock::now();
sleep_overshoot_logger.reset();
while (true) {
auto now = std::chrono::steady_clock::now();
if (next_frame > now) {
std::this_thread::sleep_for(next_frame - now);
sleep_overshoot_logger.first_point(next_frame);
sleep_overshoot_logger.second_point_now_and_log();
}
next_frame += delay;
if (next_frame < now) { // some major slowdown happened; we couldn't keep up
next_frame = now + delay;
}
std::shared_ptr<platf::img_t> img_out;
auto status = snapshot(pull_free_image_cb, img_out, 1000ms, *cursor);
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return platf::capture_e::ok;
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return platf::capture_e::ok;
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv << (int) status << ']';
return status;
}
}
return capture_e::ok;
}
capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor) {
// The whole X server changed, so we must reinit everything
if (xattr.width != env_width || xattr.height != env_height) {
BOOST_LOG(warning) << "X dimensions changed in SHM mode, request reinit"sv;
return capture_e::reinit;
}
else {
auto img_cookie = xcb::shm_get_image_unchecked(xcb.get(), display->root, offset_x, offset_y, width, height, ~0, XCB_IMAGE_FORMAT_Z_PIXMAP, seg, 0);
auto frame_timestamp = std::chrono::steady_clock::now();
xcb_img_t img_reply { xcb::shm_get_image_reply(xcb.get(), img_cookie, nullptr) };
if (!img_reply) {
BOOST_LOG(error) << "Could not get image reply"sv;
return capture_e::reinit;
}
if (!pull_free_image_cb(img_out)) {
return platf::capture_e::interrupted;
}
std::copy_n((std::uint8_t *) data.data, frame_size(), img_out->data);
img_out->frame_timestamp = frame_timestamp;
if (cursor) {
blend_cursor(shm_xdisplay.get(), *img_out, offset_x, offset_y);
}
return capture_e::ok;
}
}
std::shared_ptr<img_t>
alloc_img() override {
auto img = std::make_shared<shm_img_t>();
img->width = width;
img->height = height;
img->pixel_pitch = 4;
img->row_pitch = img->pixel_pitch * width;
img->data = new std::uint8_t[height * img->row_pitch];
return img;
}
int
dummy_img(platf::img_t *img) override {
return 0;
}
int
init(const std::string &display_name, const ::video::config_t &config) {
if (x11_attr_t::init(display_name, config)) {
return 1;
}
shm_xdisplay.reset(x11::OpenDisplay(nullptr));
xcb.reset(xcb::connect(nullptr, nullptr));
if (xcb::connection_has_error(xcb.get())) {
return -1;
}
if (!xcb::get_extension_data(xcb.get(), xcb::shm_id)->present) {
BOOST_LOG(error) << "Missing SHM extension"sv;
return -1;
}
auto iter = xcb::setup_roots_iterator(xcb::get_setup(xcb.get()));
display = iter.data;
seg = xcb::generate_id(xcb.get());
shm_id.id = shmget(IPC_PRIVATE, frame_size(), IPC_CREAT | 0777);
if (shm_id.id == -1) {
BOOST_LOG(error) << "shmget failed"sv;
return -1;
}
xcb::shm_attach(xcb.get(), seg, shm_id.id, false);
data.data = shmat(shm_id.id, nullptr, 0);
if ((uintptr_t) data.data == -1) {
BOOST_LOG(error) << "shmat failed"sv;
return -1;
}
return 0;
}
std::uint32_t
frame_size() {
return width * height * 4;
}
};
std::shared_ptr<display_t>
x11_display(platf::mem_type_e hwdevice_type, const std::string &display_name, const ::video::config_t &config) {
if (hwdevice_type != platf::mem_type_e::system && hwdevice_type != platf::mem_type_e::vaapi && hwdevice_type != platf::mem_type_e::cuda) {
BOOST_LOG(error) << "Could not initialize x11 display with the given hw device type"sv;
return nullptr;
}
if (xcb::init_shm() || xcb::init() || x11::init() || x11::rr::init() || x11::fix::init()) {
BOOST_LOG(error) << "Couldn't init x11 libraries"sv;
return nullptr;
}
// Attempt to use shared memory X11 to avoid copying the frame
auto shm_disp = std::make_shared<shm_attr_t>(hwdevice_type);
auto status = shm_disp->init(display_name, config);
if (status > 0) {
// x11_attr_t::init() failed, don't bother trying again.
return nullptr;
}
if (status == 0) {
return shm_disp;
}
// Fallback
auto x11_disp = std::make_shared<x11_attr_t>(hwdevice_type);
if (x11_disp->init(display_name, config)) {
return nullptr;
}
return x11_disp;
}
std::vector<std::string>
x11_display_names() {
if (load_x11() || load_xcb()) {
BOOST_LOG(error) << "Couldn't init x11 libraries"sv;
return {};
}
BOOST_LOG(info) << "Detecting displays"sv;
x11::xdisplay_t xdisplay { x11::OpenDisplay(nullptr) };
if (!xdisplay) {
return {};
}
auto xwindow = DefaultRootWindow(xdisplay.get());
screen_res_t screenr { x11::rr::GetScreenResources(xdisplay.get(), xwindow) };
int output = screenr->noutput;
int monitor = 0;
for (int x = 0; x < output; ++x) {
output_info_t out_info { x11::rr::GetOutputInfo(xdisplay.get(), screenr.get(), screenr->outputs[x]) };
if (out_info) {
BOOST_LOG(info) << "Detected display: "sv << out_info->name << " (id: "sv << monitor << ")"sv << out_info->name << " connected: "sv << (out_info->connection == RR_Connected);
++monitor;
}
}
std::vector<std::string> names;
names.reserve(monitor);
for (auto x = 0; x < monitor; ++x) {
names.emplace_back(std::to_string(x));
}
return names;
}
void
freeImage(XImage *p) {
XDestroyImage(p);
}
void
freeX(XFixesCursorImage *p) {
x11::Free(p);
}
int
load_xcb() {
// This will be called once only
static int xcb_status = xcb::init_shm() || xcb::init();
return xcb_status;
}
int
load_x11() {
// This will be called once only
static int x11_status =
window_system == window_system_e::NONE ||
x11::init() || x11::rr::init() || x11::fix::init();
return x11_status;
}
namespace x11 {
std::optional<cursor_t>
cursor_t::make() {
if (load_x11()) {
return std::nullopt;
}
cursor_t cursor;
cursor.ctx.reset((cursor_ctx_t::pointer) x11::OpenDisplay(nullptr));
return cursor;
}
void
cursor_t::capture(egl::cursor_t &img) {
auto display = (xdisplay_t::pointer) ctx.get();
xcursor_t xcursor = fix::GetCursorImage(display);
if (img.serial != xcursor->cursor_serial) {
auto buf_size = xcursor->width * xcursor->height * sizeof(int);
if (img.buffer.size() < buf_size) {
img.buffer.resize(buf_size);
}
std::transform(xcursor->pixels, xcursor->pixels + buf_size / 4, (int *) img.buffer.data(), [](long pixel) -> int {
return pixel;
});
}
img.data = img.buffer.data();
img.width = img.src_w = xcursor->width;
img.height = img.src_h = xcursor->height;
img.x = xcursor->x - xcursor->xhot;
img.y = xcursor->y - xcursor->yhot;
img.pixel_pitch = 4;
img.row_pitch = img.pixel_pitch * img.width;
img.serial = xcursor->cursor_serial;
}
void
cursor_t::blend(img_t &img, int offsetX, int offsetY) {
blend_cursor((xdisplay_t::pointer) ctx.get(), img, offsetX, offsetY);
}
xdisplay_t
make_display() {
return OpenDisplay(nullptr);
}
void
freeDisplay(_XDisplay *xdisplay) {
CloseDisplay(xdisplay);
}
void
freeCursorCtx(cursor_ctx_t::pointer ctx) {
CloseDisplay((xdisplay_t::pointer) ctx);
}
} // namespace x11
} // namespace platf
| 26,664
|
C++
|
.cpp
| 746
| 28.548257
| 182
| 0.582469
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,003
|
legacy_input.cpp
|
LizardByte_Sunshine/src/platform/linux/input/legacy_input.cpp
|
/**
* @file src/platform/linux/input/legacy_input.cpp
* @brief Implementation of input handling, prior to migration to inputtino
* @todo Remove this file after the next stable release
*/
#include <fcntl.h>
#include <linux/uinput.h>
#include <poll.h>
extern "C" {
#include <libevdev/libevdev-uinput.h>
#include <libevdev/libevdev.h>
}
#ifdef SUNSHINE_BUILD_X11
#include <X11/Xutil.h>
#include <X11/extensions/XTest.h>
#include <X11/keysym.h>
#include <X11/keysymdef.h>
#endif
#include <boost/locale.hpp>
#include <cmath>
#include <cstring>
#include <filesystem>
#include <thread>
#include "src/config.h"
#include "src/input.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include "src/platform/common.h"
#include "src/platform/linux/misc.h"
// Support older versions
#ifndef REL_HWHEEL_HI_RES
#define REL_HWHEEL_HI_RES 0x0c
#endif
#ifndef REL_WHEEL_HI_RES
#define REL_WHEEL_HI_RES 0x0b
#endif
using namespace std::literals;
namespace platf {
static bool has_uinput = false;
#ifdef SUNSHINE_BUILD_X11
namespace x11 {
#define _FN(x, ret, args) \
typedef ret(*x##_fn) args; \
static x##_fn x
_FN(OpenDisplay, Display *, (_Xconst char *display_name));
_FN(CloseDisplay, int, (Display * display));
_FN(InitThreads, Status, (void) );
_FN(Flush, int, (Display *) );
namespace tst {
_FN(FakeMotionEvent, int, (Display * dpy, int screen_numer, int x, int y, unsigned long delay));
_FN(FakeRelativeMotionEvent, int, (Display * dpy, int deltaX, int deltaY, unsigned long delay));
_FN(FakeButtonEvent, int, (Display * dpy, unsigned int button, Bool is_press, unsigned long delay));
_FN(FakeKeyEvent, int, (Display * dpy, unsigned int keycode, Bool is_press, unsigned long delay));
static int
init() {
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libXtst.so.6", "libXtst.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &FakeMotionEvent, "XTestFakeMotionEvent" },
{ (dyn::apiproc *) &FakeRelativeMotionEvent, "XTestFakeRelativeMotionEvent" },
{ (dyn::apiproc *) &FakeButtonEvent, "XTestFakeButtonEvent" },
{ (dyn::apiproc *) &FakeKeyEvent, "XTestFakeKeyEvent" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
} // namespace tst
static int
init() {
static void *handle { nullptr };
static bool funcs_loaded = false;
if (funcs_loaded) return 0;
if (!handle) {
handle = dyn::handle({ "libX11.so.6", "libX11.so" });
if (!handle) {
return -1;
}
}
std::vector<std::tuple<dyn::apiproc *, const char *>> funcs {
{ (dyn::apiproc *) &OpenDisplay, "XOpenDisplay" },
{ (dyn::apiproc *) &CloseDisplay, "XCloseDisplay" },
{ (dyn::apiproc *) &InitThreads, "XInitThreads" },
{ (dyn::apiproc *) &Flush, "XFlush" },
};
if (dyn::load(handle, funcs)) {
return -1;
}
funcs_loaded = true;
return 0;
}
} // namespace x11
#endif
constexpr auto mail_evdev = "platf::evdev"sv;
using evdev_t = util::safe_ptr<libevdev, libevdev_free>;
using uinput_t = util::safe_ptr<libevdev_uinput, libevdev_uinput_destroy>;
constexpr pollfd read_pollfd { -1, 0, 0 };
KITTY_USING_MOVE_T(pollfd_t, pollfd, read_pollfd, {
if (el.fd >= 0) {
ioctl(el.fd, EVIOCGRAB, (void *) 0);
close(el.fd);
}
});
using mail_evdev_t = std::tuple<int, uinput_t::pointer, feedback_queue_t, pollfd_t>;
struct keycode_t {
std::uint32_t keycode;
std::uint32_t scancode;
#ifdef SUNSHINE_BUILD_X11
KeySym keysym;
#endif
};
constexpr auto UNKNOWN = 0;
/**
* @brief Initializes the keycode constants for translating
* moonlight keycodes to linux/X11 keycodes.
*/
static constexpr std::array<keycode_t, 0xE3>
init_keycodes() {
std::array<keycode_t, 0xE3> keycodes {};
#ifdef SUNSHINE_BUILD_X11
#define __CONVERT_UNSAFE(wincode, linuxcode, scancode, keysym) \
keycodes[wincode] = keycode_t { linuxcode, scancode, keysym };
#else
#define __CONVERT_UNSAFE(wincode, linuxcode, scancode, keysym) \
keycodes[wincode] = keycode_t { linuxcode, scancode };
#endif
#define __CONVERT(wincode, linuxcode, scancode, keysym) \
static_assert(wincode < keycodes.size(), "Keycode doesn't fit into keycode array"); \
static_assert(wincode >= 0, "Are you mad?, keycode needs to be greater than zero"); \
__CONVERT_UNSAFE(wincode, linuxcode, scancode, keysym)
__CONVERT(0x08 /* VKEY_BACK */, KEY_BACKSPACE, 0x7002A, XK_BackSpace);
__CONVERT(0x09 /* VKEY_TAB */, KEY_TAB, 0x7002B, XK_Tab);
__CONVERT(0x0C /* VKEY_CLEAR */, KEY_CLEAR, UNKNOWN, XK_Clear);
__CONVERT(0x0D /* VKEY_RETURN */, KEY_ENTER, 0x70028, XK_Return);
__CONVERT(0x10 /* VKEY_SHIFT */, KEY_LEFTSHIFT, 0x700E1, XK_Shift_L);
__CONVERT(0x11 /* VKEY_CONTROL */, KEY_LEFTCTRL, 0x700E0, XK_Control_L);
__CONVERT(0x12 /* VKEY_MENU */, KEY_LEFTALT, UNKNOWN, XK_Alt_L);
__CONVERT(0x13 /* VKEY_PAUSE */, KEY_PAUSE, UNKNOWN, XK_Pause);
__CONVERT(0x14 /* VKEY_CAPITAL */, KEY_CAPSLOCK, 0x70039, XK_Caps_Lock);
__CONVERT(0x15 /* VKEY_KANA */, KEY_KATAKANAHIRAGANA, UNKNOWN, XK_Kana_Shift);
__CONVERT(0x16 /* VKEY_HANGUL */, KEY_HANGEUL, UNKNOWN, XK_Hangul);
__CONVERT(0x17 /* VKEY_JUNJA */, KEY_HANJA, UNKNOWN, XK_Hangul_Jeonja);
__CONVERT(0x19 /* VKEY_KANJI */, KEY_KATAKANA, UNKNOWN, XK_Kanji);
__CONVERT(0x1B /* VKEY_ESCAPE */, KEY_ESC, 0x70029, XK_Escape);
__CONVERT(0x20 /* VKEY_SPACE */, KEY_SPACE, 0x7002C, XK_space);
__CONVERT(0x21 /* VKEY_PRIOR */, KEY_PAGEUP, 0x7004B, XK_Page_Up);
__CONVERT(0x22 /* VKEY_NEXT */, KEY_PAGEDOWN, 0x7004E, XK_Page_Down);
__CONVERT(0x23 /* VKEY_END */, KEY_END, 0x7004D, XK_End);
__CONVERT(0x24 /* VKEY_HOME */, KEY_HOME, 0x7004A, XK_Home);
__CONVERT(0x25 /* VKEY_LEFT */, KEY_LEFT, 0x70050, XK_Left);
__CONVERT(0x26 /* VKEY_UP */, KEY_UP, 0x70052, XK_Up);
__CONVERT(0x27 /* VKEY_RIGHT */, KEY_RIGHT, 0x7004F, XK_Right);
__CONVERT(0x28 /* VKEY_DOWN */, KEY_DOWN, 0x70051, XK_Down);
__CONVERT(0x29 /* VKEY_SELECT */, KEY_SELECT, UNKNOWN, XK_Select);
__CONVERT(0x2A /* VKEY_PRINT */, KEY_PRINT, UNKNOWN, XK_Print);
__CONVERT(0x2C /* VKEY_SNAPSHOT */, KEY_SYSRQ, 0x70046, XK_Sys_Req);
__CONVERT(0x2D /* VKEY_INSERT */, KEY_INSERT, 0x70049, XK_Insert);
__CONVERT(0x2E /* VKEY_DELETE */, KEY_DELETE, 0x7004C, XK_Delete);
__CONVERT(0x2F /* VKEY_HELP */, KEY_HELP, UNKNOWN, XK_Help);
__CONVERT(0x30 /* VKEY_0 */, KEY_0, 0x70027, XK_0);
__CONVERT(0x31 /* VKEY_1 */, KEY_1, 0x7001E, XK_1);
__CONVERT(0x32 /* VKEY_2 */, KEY_2, 0x7001F, XK_2);
__CONVERT(0x33 /* VKEY_3 */, KEY_3, 0x70020, XK_3);
__CONVERT(0x34 /* VKEY_4 */, KEY_4, 0x70021, XK_4);
__CONVERT(0x35 /* VKEY_5 */, KEY_5, 0x70022, XK_5);
__CONVERT(0x36 /* VKEY_6 */, KEY_6, 0x70023, XK_6);
__CONVERT(0x37 /* VKEY_7 */, KEY_7, 0x70024, XK_7);
__CONVERT(0x38 /* VKEY_8 */, KEY_8, 0x70025, XK_8);
__CONVERT(0x39 /* VKEY_9 */, KEY_9, 0x70026, XK_9);
__CONVERT(0x41 /* VKEY_A */, KEY_A, 0x70004, XK_A);
__CONVERT(0x42 /* VKEY_B */, KEY_B, 0x70005, XK_B);
__CONVERT(0x43 /* VKEY_C */, KEY_C, 0x70006, XK_C);
__CONVERT(0x44 /* VKEY_D */, KEY_D, 0x70007, XK_D);
__CONVERT(0x45 /* VKEY_E */, KEY_E, 0x70008, XK_E);
__CONVERT(0x46 /* VKEY_F */, KEY_F, 0x70009, XK_F);
__CONVERT(0x47 /* VKEY_G */, KEY_G, 0x7000A, XK_G);
__CONVERT(0x48 /* VKEY_H */, KEY_H, 0x7000B, XK_H);
__CONVERT(0x49 /* VKEY_I */, KEY_I, 0x7000C, XK_I);
__CONVERT(0x4A /* VKEY_J */, KEY_J, 0x7000D, XK_J);
__CONVERT(0x4B /* VKEY_K */, KEY_K, 0x7000E, XK_K);
__CONVERT(0x4C /* VKEY_L */, KEY_L, 0x7000F, XK_L);
__CONVERT(0x4D /* VKEY_M */, KEY_M, 0x70010, XK_M);
__CONVERT(0x4E /* VKEY_N */, KEY_N, 0x70011, XK_N);
__CONVERT(0x4F /* VKEY_O */, KEY_O, 0x70012, XK_O);
__CONVERT(0x50 /* VKEY_P */, KEY_P, 0x70013, XK_P);
__CONVERT(0x51 /* VKEY_Q */, KEY_Q, 0x70014, XK_Q);
__CONVERT(0x52 /* VKEY_R */, KEY_R, 0x70015, XK_R);
__CONVERT(0x53 /* VKEY_S */, KEY_S, 0x70016, XK_S);
__CONVERT(0x54 /* VKEY_T */, KEY_T, 0x70017, XK_T);
__CONVERT(0x55 /* VKEY_U */, KEY_U, 0x70018, XK_U);
__CONVERT(0x56 /* VKEY_V */, KEY_V, 0x70019, XK_V);
__CONVERT(0x57 /* VKEY_W */, KEY_W, 0x7001A, XK_W);
__CONVERT(0x58 /* VKEY_X */, KEY_X, 0x7001B, XK_X);
__CONVERT(0x59 /* VKEY_Y */, KEY_Y, 0x7001C, XK_Y);
__CONVERT(0x5A /* VKEY_Z */, KEY_Z, 0x7001D, XK_Z);
__CONVERT(0x5B /* VKEY_LWIN */, KEY_LEFTMETA, 0x700E3, XK_Meta_L);
__CONVERT(0x5C /* VKEY_RWIN */, KEY_RIGHTMETA, 0x700E7, XK_Meta_R);
__CONVERT(0x5F /* VKEY_SLEEP */, KEY_SLEEP, UNKNOWN, UNKNOWN);
__CONVERT(0x60 /* VKEY_NUMPAD0 */, KEY_KP0, 0x70062, XK_KP_0);
__CONVERT(0x61 /* VKEY_NUMPAD1 */, KEY_KP1, 0x70059, XK_KP_1);
__CONVERT(0x62 /* VKEY_NUMPAD2 */, KEY_KP2, 0x7005A, XK_KP_2);
__CONVERT(0x63 /* VKEY_NUMPAD3 */, KEY_KP3, 0x7005B, XK_KP_3);
__CONVERT(0x64 /* VKEY_NUMPAD4 */, KEY_KP4, 0x7005C, XK_KP_4);
__CONVERT(0x65 /* VKEY_NUMPAD5 */, KEY_KP5, 0x7005D, XK_KP_5);
__CONVERT(0x66 /* VKEY_NUMPAD6 */, KEY_KP6, 0x7005E, XK_KP_6);
__CONVERT(0x67 /* VKEY_NUMPAD7 */, KEY_KP7, 0x7005F, XK_KP_7);
__CONVERT(0x68 /* VKEY_NUMPAD8 */, KEY_KP8, 0x70060, XK_KP_8);
__CONVERT(0x69 /* VKEY_NUMPAD9 */, KEY_KP9, 0x70061, XK_KP_9);
__CONVERT(0x6A /* VKEY_MULTIPLY */, KEY_KPASTERISK, 0x70055, XK_KP_Multiply);
__CONVERT(0x6B /* VKEY_ADD */, KEY_KPPLUS, 0x70057, XK_KP_Add);
__CONVERT(0x6C /* VKEY_SEPARATOR */, KEY_KPCOMMA, UNKNOWN, XK_KP_Separator);
__CONVERT(0x6D /* VKEY_SUBTRACT */, KEY_KPMINUS, 0x70056, XK_KP_Subtract);
__CONVERT(0x6E /* VKEY_DECIMAL */, KEY_KPDOT, 0x70063, XK_KP_Decimal);
__CONVERT(0x6F /* VKEY_DIVIDE */, KEY_KPSLASH, 0x70054, XK_KP_Divide);
__CONVERT(0x70 /* VKEY_F1 */, KEY_F1, 0x70046, XK_F1);
__CONVERT(0x71 /* VKEY_F2 */, KEY_F2, 0x70047, XK_F2);
__CONVERT(0x72 /* VKEY_F3 */, KEY_F3, 0x70048, XK_F3);
__CONVERT(0x73 /* VKEY_F4 */, KEY_F4, 0x70049, XK_F4);
__CONVERT(0x74 /* VKEY_F5 */, KEY_F5, 0x7004a, XK_F5);
__CONVERT(0x75 /* VKEY_F6 */, KEY_F6, 0x7004b, XK_F6);
__CONVERT(0x76 /* VKEY_F7 */, KEY_F7, 0x7004c, XK_F7);
__CONVERT(0x77 /* VKEY_F8 */, KEY_F8, 0x7004d, XK_F8);
__CONVERT(0x78 /* VKEY_F9 */, KEY_F9, 0x7004e, XK_F9);
__CONVERT(0x79 /* VKEY_F10 */, KEY_F10, 0x70044, XK_F10);
__CONVERT(0x7A /* VKEY_F11 */, KEY_F11, 0x70044, XK_F11);
__CONVERT(0x7B /* VKEY_F12 */, KEY_F12, 0x70045, XK_F12);
__CONVERT(0x7C /* VKEY_F13 */, KEY_F13, 0x7003a, XK_F13);
__CONVERT(0x7D /* VKEY_F14 */, KEY_F14, 0x7003b, XK_F14);
__CONVERT(0x7E /* VKEY_F15 */, KEY_F15, 0x7003c, XK_F15);
__CONVERT(0x7F /* VKEY_F16 */, KEY_F16, 0x7003d, XK_F16);
__CONVERT(0x80 /* VKEY_F17 */, KEY_F17, 0x7003e, XK_F17);
__CONVERT(0x81 /* VKEY_F18 */, KEY_F18, 0x7003f, XK_F18);
__CONVERT(0x82 /* VKEY_F19 */, KEY_F19, 0x70040, XK_F19);
__CONVERT(0x83 /* VKEY_F20 */, KEY_F20, 0x70041, XK_F20);
__CONVERT(0x84 /* VKEY_F21 */, KEY_F21, 0x70042, XK_F21);
__CONVERT(0x85 /* VKEY_F22 */, KEY_F12, 0x70043, XK_F12);
__CONVERT(0x86 /* VKEY_F23 */, KEY_F23, 0x70044, XK_F23);
__CONVERT(0x87 /* VKEY_F24 */, KEY_F24, 0x70045, XK_F24);
__CONVERT(0x90 /* VKEY_NUMLOCK */, KEY_NUMLOCK, 0x70053, XK_Num_Lock);
__CONVERT(0x91 /* VKEY_SCROLL */, KEY_SCROLLLOCK, 0x70047, XK_Scroll_Lock);
__CONVERT(0xA0 /* VKEY_LSHIFT */, KEY_LEFTSHIFT, 0x700E1, XK_Shift_L);
__CONVERT(0xA1 /* VKEY_RSHIFT */, KEY_RIGHTSHIFT, 0x700E5, XK_Shift_R);
__CONVERT(0xA2 /* VKEY_LCONTROL */, KEY_LEFTCTRL, 0x700E0, XK_Control_L);
__CONVERT(0xA3 /* VKEY_RCONTROL */, KEY_RIGHTCTRL, 0x700E4, XK_Control_R);
__CONVERT(0xA4 /* VKEY_LMENU */, KEY_LEFTALT, 0x7002E, XK_Alt_L);
__CONVERT(0xA5 /* VKEY_RMENU */, KEY_RIGHTALT, 0x700E6, XK_Alt_R);
__CONVERT(0xBA /* VKEY_OEM_1 */, KEY_SEMICOLON, 0x70033, XK_semicolon);
__CONVERT(0xBB /* VKEY_OEM_PLUS */, KEY_EQUAL, 0x7002E, XK_equal);
__CONVERT(0xBC /* VKEY_OEM_COMMA */, KEY_COMMA, 0x70036, XK_comma);
__CONVERT(0xBD /* VKEY_OEM_MINUS */, KEY_MINUS, 0x7002D, XK_minus);
__CONVERT(0xBE /* VKEY_OEM_PERIOD */, KEY_DOT, 0x70037, XK_period);
__CONVERT(0xBF /* VKEY_OEM_2 */, KEY_SLASH, 0x70038, XK_slash);
__CONVERT(0xC0 /* VKEY_OEM_3 */, KEY_GRAVE, 0x70035, XK_grave);
__CONVERT(0xDB /* VKEY_OEM_4 */, KEY_LEFTBRACE, 0x7002F, XK_braceleft);
__CONVERT(0xDC /* VKEY_OEM_5 */, KEY_BACKSLASH, 0x70031, XK_backslash);
__CONVERT(0xDD /* VKEY_OEM_6 */, KEY_RIGHTBRACE, 0x70030, XK_braceright);
__CONVERT(0xDE /* VKEY_OEM_7 */, KEY_APOSTROPHE, 0x70034, XK_apostrophe);
__CONVERT(0xE2 /* VKEY_NON_US_BACKSLASH */, KEY_102ND, 0x70064, XK_backslash);
#undef __CONVERT
#undef __CONVERT_UNSAFE
return keycodes;
}
static constexpr auto keycodes = init_keycodes();
constexpr touch_port_t target_touch_port {
0, 0,
19200, 12000
};
static std::pair<std::uint32_t, std::uint32_t>
operator*(const std::pair<std::uint32_t, std::uint32_t> &l, int r) {
return {
l.first * r,
l.second * r,
};
}
static std::pair<std::uint32_t, std::uint32_t>
operator/(const std::pair<std::uint32_t, std::uint32_t> &l, int r) {
return {
l.first / r,
l.second / r,
};
}
static std::pair<std::uint32_t, std::uint32_t> &
operator+=(std::pair<std::uint32_t, std::uint32_t> &l, const std::pair<std::uint32_t, std::uint32_t> &r) {
l.first += r.first;
l.second += r.second;
return l;
}
static inline void
print(const ff_envelope &envelope) {
BOOST_LOG(debug)
<< "Envelope:"sv << std::endl
<< " attack_length: " << envelope.attack_length << std::endl
<< " attack_level: " << envelope.attack_level << std::endl
<< " fade_length: " << envelope.fade_length << std::endl
<< " fade_level: " << envelope.fade_level;
}
static inline void
print(const ff_replay &replay) {
BOOST_LOG(debug)
<< "Replay:"sv << std::endl
<< " length: "sv << replay.length << std::endl
<< " delay: "sv << replay.delay;
}
static inline void
print(const ff_trigger &trigger) {
BOOST_LOG(debug)
<< "Trigger:"sv << std::endl
<< " button: "sv << trigger.button << std::endl
<< " interval: "sv << trigger.interval;
}
static inline void
print(const ff_effect &effect) {
BOOST_LOG(debug)
<< std::endl
<< std::endl
<< "Received rumble effect with id: ["sv << effect.id << ']';
switch (effect.type) {
case FF_CONSTANT:
BOOST_LOG(debug)
<< "FF_CONSTANT:"sv << std::endl
<< " direction: "sv << effect.direction << std::endl
<< " level: "sv << effect.u.constant.level;
print(effect.u.constant.envelope);
break;
case FF_PERIODIC:
BOOST_LOG(debug)
<< "FF_CONSTANT:"sv << std::endl
<< " direction: "sv << effect.direction << std::endl
<< " waveform: "sv << effect.u.periodic.waveform << std::endl
<< " period: "sv << effect.u.periodic.period << std::endl
<< " magnitude: "sv << effect.u.periodic.magnitude << std::endl
<< " offset: "sv << effect.u.periodic.offset << std::endl
<< " phase: "sv << effect.u.periodic.phase;
print(effect.u.periodic.envelope);
break;
case FF_RAMP:
BOOST_LOG(debug)
<< "FF_RAMP:"sv << std::endl
<< " direction: "sv << effect.direction << std::endl
<< " start_level:" << effect.u.ramp.start_level << std::endl
<< " end_level:" << effect.u.ramp.end_level;
print(effect.u.ramp.envelope);
break;
case FF_RUMBLE:
BOOST_LOG(debug)
<< "FF_RUMBLE:" << std::endl
<< " direction: "sv << effect.direction << std::endl
<< " strong_magnitude: " << effect.u.rumble.strong_magnitude << std::endl
<< " weak_magnitude: " << effect.u.rumble.weak_magnitude;
break;
case FF_SPRING:
BOOST_LOG(debug)
<< "FF_SPRING:" << std::endl
<< " direction: "sv << effect.direction;
break;
case FF_FRICTION:
BOOST_LOG(debug)
<< "FF_FRICTION:" << std::endl
<< " direction: "sv << effect.direction;
break;
case FF_DAMPER:
BOOST_LOG(debug)
<< "FF_DAMPER:" << std::endl
<< " direction: "sv << effect.direction;
break;
case FF_INERTIA:
BOOST_LOG(debug)
<< "FF_INERTIA:" << std::endl
<< " direction: "sv << effect.direction;
break;
case FF_CUSTOM:
BOOST_LOG(debug)
<< "FF_CUSTOM:" << std::endl
<< " direction: "sv << effect.direction;
break;
default:
BOOST_LOG(debug)
<< "FF_UNKNOWN:" << std::endl
<< " direction: "sv << effect.direction;
break;
}
print(effect.replay);
print(effect.trigger);
}
// Emulate rumble effects
class effect_t {
public:
KITTY_DEFAULT_CONSTR_MOVE(effect_t)
effect_t(std::uint8_t gamepadnr, uinput_t::pointer dev, feedback_queue_t &&q):
gamepadnr { gamepadnr }, dev { dev }, rumble_queue { std::move(q) }, gain { 0xFFFF }, id_to_data {} {}
class data_t {
public:
KITTY_DEFAULT_CONSTR(data_t)
data_t(const ff_effect &effect):
delay { effect.replay.delay },
length { effect.replay.length },
end_point { std::chrono::steady_clock::time_point::min() },
envelope {},
start {},
end {} {
switch (effect.type) {
case FF_CONSTANT:
start.weak = effect.u.constant.level;
start.strong = effect.u.constant.level;
end.weak = effect.u.constant.level;
end.strong = effect.u.constant.level;
envelope = effect.u.constant.envelope;
break;
case FF_PERIODIC:
start.weak = effect.u.periodic.magnitude;
start.strong = effect.u.periodic.magnitude;
end.weak = effect.u.periodic.magnitude;
end.strong = effect.u.periodic.magnitude;
envelope = effect.u.periodic.envelope;
break;
case FF_RAMP:
start.weak = effect.u.ramp.start_level;
start.strong = effect.u.ramp.start_level;
end.weak = effect.u.ramp.end_level;
end.strong = effect.u.ramp.end_level;
envelope = effect.u.ramp.envelope;
break;
case FF_RUMBLE:
start.weak = effect.u.rumble.weak_magnitude;
start.strong = effect.u.rumble.strong_magnitude;
end.weak = effect.u.rumble.weak_magnitude;
end.strong = effect.u.rumble.strong_magnitude;
break;
default:
BOOST_LOG(warning) << "Effect type ["sv << effect.id << "] not implemented"sv;
}
}
std::uint32_t
magnitude(std::chrono::milliseconds time_left, std::uint32_t start, std::uint32_t end) {
auto rel = end - start;
return start + (rel * time_left.count() / length.count());
}
std::pair<std::uint32_t, std::uint32_t>
rumble(std::chrono::steady_clock::time_point tp) {
if (end_point < tp) {
return {};
}
auto time_left =
std::chrono::duration_cast<std::chrono::milliseconds>(
end_point - tp);
// If it needs to be delayed'
if (time_left > length) {
return {};
}
auto t = length - time_left;
auto weak = magnitude(t, start.weak, end.weak);
auto strong = magnitude(t, start.strong, end.strong);
if (t.count() < envelope.attack_length) {
weak = (envelope.attack_level * t.count() + weak * (envelope.attack_length - t.count())) / envelope.attack_length;
strong = (envelope.attack_level * t.count() + strong * (envelope.attack_length - t.count())) / envelope.attack_length;
}
else if (time_left.count() < envelope.fade_length) {
auto dt = (t - length).count() + envelope.fade_length;
weak = (envelope.fade_level * dt + weak * (envelope.fade_length - dt)) / envelope.fade_length;
strong = (envelope.fade_level * dt + strong * (envelope.fade_length - dt)) / envelope.fade_length;
}
return {
weak, strong
};
}
void
activate() {
end_point = std::chrono::steady_clock::now() + delay + length;
}
void
deactivate() {
end_point = std::chrono::steady_clock::time_point::min();
}
std::chrono::milliseconds delay;
std::chrono::milliseconds length;
std::chrono::steady_clock::time_point end_point;
ff_envelope envelope;
struct {
std::uint32_t weak, strong;
} start;
struct {
std::uint32_t weak, strong;
} end;
};
std::pair<std::uint32_t, std::uint32_t>
rumble(std::chrono::steady_clock::time_point tp) {
std::pair<std::uint32_t, std::uint32_t> weak_strong {};
for (auto &[_, data] : id_to_data) {
weak_strong += data.rumble(tp);
}
weak_strong.first = std::clamp<std::uint32_t>(weak_strong.first, 0, 0xFFFF);
weak_strong.second = std::clamp<std::uint32_t>(weak_strong.second, 0, 0xFFFF);
old_rumble = weak_strong * gain / 0xFFFF;
return old_rumble;
}
void
upload(const ff_effect &effect) {
print(effect);
auto it = id_to_data.find(effect.id);
if (it == std::end(id_to_data)) {
id_to_data.emplace(effect.id, effect);
return;
}
data_t data { effect };
data.end_point = it->second.end_point;
it->second = data;
}
void
activate(int id) {
auto it = id_to_data.find(id);
if (it != std::end(id_to_data)) {
it->second.activate();
}
}
void
deactivate(int id) {
auto it = id_to_data.find(id);
if (it != std::end(id_to_data)) {
it->second.deactivate();
}
}
void
erase(int id) {
id_to_data.erase(id);
BOOST_LOG(debug) << "Removed rumble effect id ["sv << id << ']';
}
// Client-relative gamepad index for rumble notifications
std::uint8_t gamepadnr;
// Used as ID for adding/removinf devices from evdev notifications
uinput_t::pointer dev;
feedback_queue_t rumble_queue;
int gain;
// No need to send rumble data when old values equals the new values
std::pair<std::uint32_t, std::uint32_t> old_rumble;
std::unordered_map<int, data_t> id_to_data;
};
struct rumble_ctx_t {
std::thread rumble_thread;
safe::queue_t<mail_evdev_t> rumble_queue_queue;
};
void
broadcastRumble(safe::queue_t<mail_evdev_t> &ctx);
int
startRumble(rumble_ctx_t &ctx) {
ctx.rumble_thread = std::thread { broadcastRumble, std::ref(ctx.rumble_queue_queue) };
return 0;
}
void
stopRumble(rumble_ctx_t &ctx) {
ctx.rumble_queue_queue.stop();
BOOST_LOG(debug) << "Waiting for Gamepad notifications to stop..."sv;
ctx.rumble_thread.join();
BOOST_LOG(debug) << "Gamepad notifications stopped"sv;
}
static auto notifications = safe::make_shared<rumble_ctx_t>(startRumble, stopRumble);
struct input_raw_t {
public:
void
clear_mouse_rel() {
std::filesystem::path mouse_path { appdata() / "sunshine_mouse_rel"sv };
if (std::filesystem::is_symlink(mouse_path)) {
std::filesystem::remove(mouse_path);
}
mouse_rel_input.reset();
}
void
clear_keyboard() {
std::filesystem::path key_path { appdata() / "sunshine_keyboard"sv };
if (std::filesystem::is_symlink(key_path)) {
std::filesystem::remove(key_path);
}
keyboard_input.reset();
}
void
clear_mouse_abs() {
std::filesystem::path mouse_path { appdata() / "sunshine_mouse_abs"sv };
if (std::filesystem::is_symlink(mouse_path)) {
std::filesystem::remove(mouse_path);
}
mouse_abs_input.reset();
}
void
clear_gamepad(int nr) {
auto &[dev, _] = gamepads[nr];
if (!dev) {
return;
}
// Remove this gamepad from notifications
rumble_ctx->rumble_queue_queue.raise(nr, dev.get(), nullptr, pollfd_t {});
std::stringstream ss;
ss << "sunshine_gamepad_"sv << nr;
auto gamepad_path = platf::appdata() / ss.str();
if (std::filesystem::is_symlink(gamepad_path)) {
std::filesystem::remove(gamepad_path);
}
gamepads[nr] = std::make_pair(uinput_t {}, gamepad_state_t {});
}
int
create_mouse_abs() {
int err = libevdev_uinput_create_from_device(mouse_abs_dev.get(), LIBEVDEV_UINPUT_OPEN_MANAGED, &mouse_abs_input);
if (err) {
BOOST_LOG(error) << "Could not create Sunshine Mouse (Absolute): "sv << strerror(-err);
return -1;
}
std::filesystem::create_symlink(libevdev_uinput_get_devnode(mouse_abs_input.get()), appdata() / "sunshine_mouse_abs"sv);
return 0;
}
int
create_mouse_rel() {
int err = libevdev_uinput_create_from_device(mouse_rel_dev.get(), LIBEVDEV_UINPUT_OPEN_MANAGED, &mouse_rel_input);
if (err) {
BOOST_LOG(error) << "Could not create Sunshine Mouse (Relative): "sv << strerror(-err);
return -1;
}
std::filesystem::create_symlink(libevdev_uinput_get_devnode(mouse_rel_input.get()), appdata() / "sunshine_mouse_rel"sv);
return 0;
}
int
create_keyboard() {
int err = libevdev_uinput_create_from_device(keyboard_dev.get(), LIBEVDEV_UINPUT_OPEN_MANAGED, &keyboard_input);
if (err) {
BOOST_LOG(error) << "Could not create Sunshine Keyboard: "sv << strerror(-err);
return -1;
}
std::filesystem::create_symlink(libevdev_uinput_get_devnode(keyboard_input.get()), appdata() / "sunshine_keyboard"sv);
return 0;
}
int
alloc_gamepad(const gamepad_id_t &id, const gamepad_arrival_t &metadata, feedback_queue_t &&feedback_queue) {
TUPLE_2D_REF(input, gamepad_state, gamepads[id.globalIndex]);
int err = libevdev_uinput_create_from_device(gamepad_dev.get(), LIBEVDEV_UINPUT_OPEN_MANAGED, &input);
gamepad_state = gamepad_state_t {};
if (err) {
BOOST_LOG(error) << "Could not create Sunshine Gamepad: "sv << strerror(-err);
return -1;
}
std::stringstream ss;
ss << "sunshine_gamepad_"sv << id.globalIndex;
auto gamepad_path = platf::appdata() / ss.str();
if (std::filesystem::is_symlink(gamepad_path)) {
std::filesystem::remove(gamepad_path);
}
auto dev_node = libevdev_uinput_get_devnode(input.get());
rumble_ctx->rumble_queue_queue.raise(
id.clientRelativeIndex,
input.get(),
std::move(feedback_queue),
pollfd_t {
dup(libevdev_uinput_get_fd(input.get())),
(std::int16_t) POLLIN,
(std::int16_t) 0,
});
std::filesystem::create_symlink(dev_node, gamepad_path);
return 0;
}
void
clear() {
clear_keyboard();
clear_mouse_abs();
clear_mouse_rel();
for (int x = 0; x < gamepads.size(); ++x) {
clear_gamepad(x);
}
#ifdef SUNSHINE_BUILD_X11
if (display) {
x11::CloseDisplay(display);
display = nullptr;
}
#endif
}
~input_raw_t() {
clear();
}
safe::shared_t<rumble_ctx_t>::ptr_t rumble_ctx;
std::vector<std::pair<uinput_t, gamepad_state_t>> gamepads;
uinput_t mouse_rel_input;
uinput_t mouse_abs_input;
uinput_t keyboard_input;
uint8_t mouse_rel_buttons_down = 0;
uint8_t mouse_abs_buttons_down = 0;
uinput_t::pointer last_mouse_device_used = nullptr;
uint8_t *last_mouse_device_buttons_down = nullptr;
evdev_t gamepad_dev;
evdev_t mouse_rel_dev;
evdev_t mouse_abs_dev;
evdev_t keyboard_dev;
evdev_t touchscreen_dev;
evdev_t pen_dev;
int accumulated_vscroll_delta = 0;
int accumulated_hscroll_delta = 0;
#ifdef SUNSHINE_BUILD_X11
Display *display;
#endif
};
inline void
rumbleIterate(std::vector<effect_t> &effects, std::vector<pollfd_t> &polls, std::chrono::milliseconds to) {
std::vector<pollfd> polls_recv;
polls_recv.reserve(polls.size());
for (auto &poll : polls) {
polls_recv.emplace_back(poll.el);
}
auto res = poll(polls_recv.data(), polls_recv.size(), to.count());
// If timed out
if (!res) {
return;
}
if (res < 0) {
char err_str[1024];
BOOST_LOG(error) << "Couldn't poll Gamepad file descriptors: "sv << strerror_r(errno, err_str, 1024);
return;
}
for (int x = 0; x < polls.size(); ++x) {
auto poll = std::begin(polls) + x;
auto effect_it = std::begin(effects) + x;
auto fd = (*poll)->fd;
// TUPLE_2D_REF(dev, q, *dev_q_it);
// on error
if (polls_recv[x].revents & (POLLHUP | POLLRDHUP | POLLERR)) {
BOOST_LOG(warning) << "Gamepad ["sv << x << "] file descriptor closed unexpectedly"sv;
polls.erase(poll);
effects.erase(effect_it);
--x;
continue;
}
if (!(polls_recv[x].revents & POLLIN)) {
continue;
}
input_event events[64];
// Read all available events
auto bytes = read(fd, &events, sizeof(events));
if (bytes < 0) {
char err_str[1024];
BOOST_LOG(error) << "Couldn't read evdev input ["sv << errno << "]: "sv << strerror_r(errno, err_str, 1024);
polls.erase(poll);
effects.erase(effect_it);
--x;
continue;
}
if (bytes < sizeof(input_event)) {
BOOST_LOG(warning) << "Reading evdev input: Expected at least "sv << sizeof(input_event) << " bytes, got "sv << bytes << " instead"sv;
continue;
}
auto event_count = bytes / sizeof(input_event);
for (auto event = events; event != (events + event_count); ++event) {
switch (event->type) {
case EV_FF:
// BOOST_LOG(debug) << "EV_FF: "sv << event->value << " aka "sv << util::hex(event->value).to_string_view();
if (event->code == FF_GAIN) {
BOOST_LOG(debug) << "EV_FF: code [FF_GAIN]: value: "sv << event->value << " aka "sv << util::hex(event->value).to_string_view();
effect_it->gain = std::clamp(event->value, 0, 0xFFFF);
break;
}
BOOST_LOG(debug) << "EV_FF: id ["sv << event->code << "]: value: "sv << event->value << " aka "sv << util::hex(event->value).to_string_view();
if (event->value) {
effect_it->activate(event->code);
}
else {
effect_it->deactivate(event->code);
}
break;
case EV_UINPUT:
switch (event->code) {
case UI_FF_UPLOAD: {
uinput_ff_upload upload {};
// *VERY* important, without this you break
// the kernel and have to reboot due to dead
// hanging process
upload.request_id = event->value;
ioctl(fd, UI_BEGIN_FF_UPLOAD, &upload);
auto fg = util::fail_guard([&]() {
upload.retval = 0;
ioctl(fd, UI_END_FF_UPLOAD, &upload);
});
effect_it->upload(upload.effect);
} break;
case UI_FF_ERASE: {
uinput_ff_erase erase {};
// *VERY* important, without this you break
// the kernel and have to reboot due to dead
// hanging process
erase.request_id = event->value;
ioctl(fd, UI_BEGIN_FF_ERASE, &erase);
auto fg = util::fail_guard([&]() {
erase.retval = 0;
ioctl(fd, UI_END_FF_ERASE, &erase);
});
effect_it->erase(erase.effect_id);
} break;
}
break;
default:
BOOST_LOG(debug)
<< util::hex(event->type).to_string_view() << ": "sv
<< util::hex(event->code).to_string_view() << ": "sv
<< event->value << " aka "sv << util::hex(event->value).to_string_view();
}
}
}
}
void
broadcastRumble(safe::queue_t<mail_evdev_t> &rumble_queue_queue) {
std::vector<effect_t> effects;
std::vector<pollfd_t> polls;
while (rumble_queue_queue.running()) {
while (rumble_queue_queue.peek()) {
auto dev_rumble_queue = rumble_queue_queue.pop();
if (!dev_rumble_queue) {
// rumble_queue_queue is no longer running
return;
}
auto gamepadnr = std::get<0>(*dev_rumble_queue);
auto dev = std::get<1>(*dev_rumble_queue);
auto &rumble_queue = std::get<2>(*dev_rumble_queue);
auto &pollfd = std::get<3>(*dev_rumble_queue);
{
auto effect_it = std::find_if(std::begin(effects), std::end(effects), [dev](auto &curr_effect) {
return dev == curr_effect.dev;
});
if (effect_it != std::end(effects)) {
auto poll_it = std::begin(polls) + (effect_it - std::begin(effects));
polls.erase(poll_it);
effects.erase(effect_it);
BOOST_LOG(debug) << "Removed Gamepad device from notifications"sv;
continue;
}
// There may be an attepmt to remove, that which not exists
if (!rumble_queue) {
BOOST_LOG(warning) << "Attempting to remove a gamepad device from notifications that isn't already registered"sv;
continue;
}
}
polls.emplace_back(std::move(pollfd));
effects.emplace_back(gamepadnr, dev, std::move(rumble_queue));
BOOST_LOG(debug) << "Added Gamepad device to notifications"sv;
}
if (polls.empty()) {
std::this_thread::sleep_for(250ms);
}
else {
rumbleIterate(effects, polls, 100ms);
auto now = std::chrono::steady_clock::now();
for (auto &effect : effects) {
TUPLE_2D(old_weak, old_strong, effect.old_rumble);
TUPLE_2D(weak, strong, effect.rumble(now));
if (old_weak != weak || old_strong != strong) {
BOOST_LOG(debug) << "Sending haptic feedback: lowfreq [0x"sv << util::hex(strong).to_string_view() << "]: highfreq [0x"sv << util::hex(weak).to_string_view() << ']';
effect.rumble_queue->raise(gamepad_feedback_msg_t::make_rumble(effect.gamepadnr, strong, weak));
}
}
}
}
}
/**
* @brief XTest absolute mouse move.
* @param input The input_t instance to use.
* @param x Absolute x position.
* @param y Absolute y position.
* @examples
* x_abs_mouse(input, 0, 0);
* @examples_end
*/
static void
x_abs_mouse(input_t &input, float x, float y) {
#ifdef SUNSHINE_BUILD_X11
Display *xdisplay = ((input_raw_t *) input.get())->display;
if (!xdisplay) {
return;
}
x11::tst::FakeMotionEvent(xdisplay, -1, x, y, CurrentTime);
x11::Flush(xdisplay);
#endif
}
util::point_t
get_mouse_loc(input_t &input) {
#ifdef SUNSHINE_BUILD_X11
Display *xdisplay = ((input_raw_t *) input.get())->display;
if (!xdisplay) {
return util::point_t {};
}
Window root, root_return, child_return;
root = DefaultRootWindow(xdisplay);
int root_x, root_y;
int win_x, win_y;
unsigned int mask_return;
if (XQueryPointer(xdisplay, root, &root_return, &child_return, &root_x, &root_y, &win_x, &win_y, &mask_return)) {
BOOST_LOG(debug)
<< "Pointer is at:"sv << std::endl
<< " x: " << root_x << std::endl
<< " y: " << root_y << std::endl;
return util::point_t { (double) root_x, (double) root_y };
}
else {
BOOST_LOG(debug) << "Unable to query x11 pointer"sv << std::endl;
}
#else
BOOST_LOG(debug) << "Unable to query wayland pointer"sv << std::endl;
#endif
return util::point_t {};
}
/**
* @brief Absolute mouse move.
* @param input The input_t instance to use.
* @param touch_port The touch_port instance to use.
* @param x Absolute x position.
* @param y Absolute y position.
* @examples
* abs_mouse(input, touch_port, 0, 0);
* @examples_end
*/
void
abs_mouse(input_t &input, const touch_port_t &touch_port, float x, float y) {
auto raw = (input_raw_t *) input.get();
auto mouse_abs = raw->mouse_abs_input.get();
if (!mouse_abs) {
x_abs_mouse(input, x, y);
return;
}
auto scaled_x = (int) std::lround((x + touch_port.offset_x) * ((float) target_touch_port.width / (float) touch_port.width));
auto scaled_y = (int) std::lround((y + touch_port.offset_y) * ((float) target_touch_port.height / (float) touch_port.height));
libevdev_uinput_write_event(mouse_abs, EV_ABS, ABS_X, scaled_x);
libevdev_uinput_write_event(mouse_abs, EV_ABS, ABS_Y, scaled_y);
libevdev_uinput_write_event(mouse_abs, EV_SYN, SYN_REPORT, 0);
// Remember this was the last device we sent input on
raw->last_mouse_device_used = mouse_abs;
raw->last_mouse_device_buttons_down = &raw->mouse_abs_buttons_down;
}
/**
* @brief XTest relative mouse move.
* @param input The input_t instance to use.
* @param deltaX Relative x position.
* @param deltaY Relative y position.
* @examples
* x_move_mouse(input, 10, 10); // Move mouse 10 pixels down and right
* @examples_end
*/
static void
x_move_mouse(input_t &input, int deltaX, int deltaY) {
#ifdef SUNSHINE_BUILD_X11
Display *xdisplay = ((input_raw_t *) input.get())->display;
if (!xdisplay) {
return;
}
x11::tst::FakeRelativeMotionEvent(xdisplay, deltaX, deltaY, CurrentTime);
x11::Flush(xdisplay);
#endif
}
/**
* @brief Relative mouse move.
* @param input The input_t instance to use.
* @param deltaX Relative x position.
* @param deltaY Relative y position.
* @examples
* move_mouse(input, 10, 10); // Move mouse 10 pixels down and right
* @examples_end
*/
void
move_mouse(input_t &input, int deltaX, int deltaY) {
auto raw = (input_raw_t *) input.get();
auto mouse_rel = raw->mouse_rel_input.get();
if (!mouse_rel) {
x_move_mouse(input, deltaX, deltaY);
return;
}
if (deltaX) {
libevdev_uinput_write_event(mouse_rel, EV_REL, REL_X, deltaX);
}
if (deltaY) {
libevdev_uinput_write_event(mouse_rel, EV_REL, REL_Y, deltaY);
}
libevdev_uinput_write_event(mouse_rel, EV_SYN, SYN_REPORT, 0);
// Remember this was the last device we sent input on
raw->last_mouse_device_used = mouse_rel;
raw->last_mouse_device_buttons_down = &raw->mouse_rel_buttons_down;
}
/**
* @brief XTest mouse button press/release.
* @param input The input_t instance to use.
* @param button Which mouse button to emulate.
* @param release Whether the event was a press (false) or a release (true)
* @examples
* x_button_mouse(input, 1, false); // Press left mouse button
* @examples_end
*/
static void
x_button_mouse(input_t &input, int button, bool release) {
#ifdef SUNSHINE_BUILD_X11
unsigned int x_button = 0;
switch (button) {
case BUTTON_LEFT:
x_button = 1;
break;
case BUTTON_MIDDLE:
x_button = 2;
break;
case BUTTON_RIGHT:
x_button = 3;
break;
default:
x_button = (button - 4) + 8; // Button 4 (Moonlight) starts at index 8 (X11)
break;
}
if (x_button < 1 || x_button > 31) {
return;
}
Display *xdisplay = ((input_raw_t *) input.get())->display;
if (!xdisplay) {
return;
}
x11::tst::FakeButtonEvent(xdisplay, x_button, !release, CurrentTime);
x11::Flush(xdisplay);
#endif
}
/**
* @brief Mouse button press/release.
* @param input The input_t instance to use.
* @param button Which mouse button to emulate.
* @param release Whether the event was a press (false) or a release (true)
* @examples
* button_mouse(input, 1, false); // Press left mouse button
* @examples_end
*/
void
button_mouse(input_t &input, int button, bool release) {
auto raw = (input_raw_t *) input.get();
// We mimic the Linux vmmouse driver here and prefer to send buttons
// on the last mouse device we used. However, we make an exception
// if it's a release event and the button is down on the other device.
uinput_t::pointer chosen_mouse_dev = nullptr;
uint8_t *chosen_mouse_dev_buttons_down = nullptr;
if (release) {
// Prefer to send the release on the mouse with the button down
if (raw->mouse_rel_buttons_down & (1 << button)) {
chosen_mouse_dev = raw->mouse_rel_input.get();
chosen_mouse_dev_buttons_down = &raw->mouse_rel_buttons_down;
}
else if (raw->mouse_abs_buttons_down & (1 << button)) {
chosen_mouse_dev = raw->mouse_abs_input.get();
chosen_mouse_dev_buttons_down = &raw->mouse_abs_buttons_down;
}
}
if (!chosen_mouse_dev) {
if (raw->last_mouse_device_used) {
// Prefer to use the last device we sent motion
chosen_mouse_dev = raw->last_mouse_device_used;
chosen_mouse_dev_buttons_down = raw->last_mouse_device_buttons_down;
}
else {
// Send on the relative device if we have no preference yet
chosen_mouse_dev = raw->mouse_rel_input.get();
chosen_mouse_dev_buttons_down = &raw->mouse_rel_buttons_down;
}
}
if (!chosen_mouse_dev) {
x_button_mouse(input, button, release);
return;
}
int btn_type;
int scan;
if (button == 1) {
btn_type = BTN_LEFT;
scan = 90001;
}
else if (button == 2) {
btn_type = BTN_MIDDLE;
scan = 90003;
}
else if (button == 3) {
btn_type = BTN_RIGHT;
scan = 90002;
}
else if (button == 4) {
btn_type = BTN_SIDE;
scan = 90004;
}
else {
btn_type = BTN_EXTRA;
scan = 90005;
}
libevdev_uinput_write_event(chosen_mouse_dev, EV_MSC, MSC_SCAN, scan);
libevdev_uinput_write_event(chosen_mouse_dev, EV_KEY, btn_type, release ? 0 : 1);
libevdev_uinput_write_event(chosen_mouse_dev, EV_SYN, SYN_REPORT, 0);
if (release) {
*chosen_mouse_dev_buttons_down &= ~(1 << button);
}
else {
*chosen_mouse_dev_buttons_down |= (1 << button);
}
}
/**
* @brief XTest mouse scroll.
* @param input The input_t instance to use.
* @param distance How far to scroll.
* @param button_pos Which mouse button to emulate for positive scroll.
* @param button_neg Which mouse button to emulate for negative scroll.
* @examples
* x_scroll(input, 10, 4, 5);
* @examples_end
*/
static void
x_scroll(input_t &input, int distance, int button_pos, int button_neg) {
#ifdef SUNSHINE_BUILD_X11
Display *xdisplay = ((input_raw_t *) input.get())->display;
if (!xdisplay) {
return;
}
const int button = distance > 0 ? button_pos : button_neg;
for (int i = 0; i < abs(distance); i++) {
x11::tst::FakeButtonEvent(xdisplay, button, true, CurrentTime);
x11::tst::FakeButtonEvent(xdisplay, button, false, CurrentTime);
}
x11::Flush(xdisplay);
#endif
}
/**
* @brief Vertical mouse scroll.
* @param input The input_t instance to use.
* @param high_res_distance How far to scroll.
* @examples
* scroll(input, 1200);
* @examples_end
*/
void
scroll(input_t &input, int high_res_distance) {
auto raw = ((input_raw_t *) input.get());
raw->accumulated_vscroll_delta += high_res_distance;
int full_ticks = raw->accumulated_vscroll_delta / 120;
// We mimic the Linux vmmouse driver and always send scroll events
// via the relative pointing device for Xorg compatibility.
auto mouse = raw->mouse_rel_input.get();
if (mouse) {
if (full_ticks) {
libevdev_uinput_write_event(mouse, EV_REL, REL_WHEEL, full_ticks);
}
libevdev_uinput_write_event(mouse, EV_REL, REL_WHEEL_HI_RES, high_res_distance);
libevdev_uinput_write_event(mouse, EV_SYN, SYN_REPORT, 0);
}
else if (full_ticks) {
x_scroll(input, full_ticks, 4, 5);
}
raw->accumulated_vscroll_delta -= full_ticks * 120;
}
/**
* @brief Horizontal mouse scroll.
* @param input The input_t instance to use.
* @param high_res_distance How far to scroll.
* @examples
* hscroll(input, 1200);
* @examples_end
*/
void
hscroll(input_t &input, int high_res_distance) {
auto raw = ((input_raw_t *) input.get());
raw->accumulated_hscroll_delta += high_res_distance;
int full_ticks = raw->accumulated_hscroll_delta / 120;
// We mimic the Linux vmmouse driver and always send scroll events
// via the relative pointing device for Xorg compatibility.
auto mouse_rel = raw->mouse_rel_input.get();
if (mouse_rel) {
if (full_ticks) {
libevdev_uinput_write_event(mouse_rel, EV_REL, REL_HWHEEL, full_ticks);
}
libevdev_uinput_write_event(mouse_rel, EV_REL, REL_HWHEEL_HI_RES, high_res_distance);
libevdev_uinput_write_event(mouse_rel, EV_SYN, SYN_REPORT, 0);
}
else if (full_ticks) {
x_scroll(input, full_ticks, 6, 7);
}
raw->accumulated_hscroll_delta -= full_ticks * 120;
}
static keycode_t
keysym(std::uint16_t modcode) {
if (modcode <= keycodes.size()) {
return keycodes[modcode];
}
return {};
}
/**
* @brief XTest keyboard emulation.
* @param input The input_t instance to use.
* @param modcode The moonlight key code.
* @param release Whether the event was a press (false) or a release (true).
* @param flags SS_KBE_FLAG_* values.
* @examples
* x_keyboard(input, 0x5A, false, 0); // Press Z
* @examples_end
*/
static void
x_keyboard(input_t &input, uint16_t modcode, bool release, uint8_t flags) {
#ifdef SUNSHINE_BUILD_X11
auto keycode = keysym(modcode);
if (keycode.keysym == UNKNOWN) {
return;
}
Display *xdisplay = ((input_raw_t *) input.get())->display;
if (!xdisplay) {
return;
}
const auto keycode_x = XKeysymToKeycode(xdisplay, keycode.keysym);
if (keycode_x == 0) {
return;
}
x11::tst::FakeKeyEvent(xdisplay, keycode_x, !release, CurrentTime);
x11::Flush(xdisplay);
#endif
}
/**
* @brief Keyboard emulation.
* @param input The input_t instance to use.
* @param modcode The moonlight key code.
* @param release Whether the event was a press (false) or a release (true).
* @param flags SS_KBE_FLAG_* values.
* @examples
* keyboard(input, 0x5A, false, 0); // Press Z
* @examples_end
*/
void
keyboard_update(input_t &input, uint16_t modcode, bool release, uint8_t flags) {
auto keyboard = ((input_raw_t *) input.get())->keyboard_input.get();
if (!keyboard) {
x_keyboard(input, modcode, release, flags);
return;
}
auto keycode = keysym(modcode);
if (keycode.keycode == UNKNOWN) {
return;
}
if (keycode.scancode != UNKNOWN) {
libevdev_uinput_write_event(keyboard, EV_MSC, MSC_SCAN, keycode.scancode);
}
libevdev_uinput_write_event(keyboard, EV_KEY, keycode.keycode, release ? 0 : 1);
libevdev_uinput_write_event(keyboard, EV_SYN, SYN_REPORT, 0);
}
void
keyboard_ev(libevdev_uinput *keyboard, int linux_code, int event_code = 1) {
libevdev_uinput_write_event(keyboard, EV_KEY, linux_code, event_code);
libevdev_uinput_write_event(keyboard, EV_SYN, SYN_REPORT, 0);
}
/**
* Takes an UTF-32 encoded string and returns a hex string representation of the bytes (uppercase)
*
* ex: ['👱'] = "1F471" // see UTF encoding at https://www.compart.com/en/unicode/U+1F471
*
* adapted from: https://stackoverflow.com/a/7639754
*/
std::string
to_hex(const std::basic_string<char32_t> &str) {
std::stringstream ss;
ss << std::hex << std::setfill('0');
for (const auto &ch : str) {
ss << static_cast<uint_least32_t>(ch);
}
std::string hex_unicode(ss.str());
std::transform(hex_unicode.begin(), hex_unicode.end(), hex_unicode.begin(), ::toupper);
return hex_unicode;
}
/**
* Here we receive a single UTF-8 encoded char at a time,
* the trick is to convert it to UTF-32 then send CTRL+SHIFT+U+{HEXCODE} in order to produce any
* unicode character, see: https://en.wikipedia.org/wiki/Unicode_input
*
* ex:
* - when receiving UTF-8 [0xF0 0x9F 0x91 0xB1] (which is '👱')
* - we'll convert it to UTF-32 [0x1F471]
* - then type: CTRL+SHIFT+U+1F471
* see the conversion at: https://www.compart.com/en/unicode/U+1F471
*/
void
unicode(input_t &input, char *utf8, int size) {
auto kb = ((input_raw_t *) input.get())->keyboard_input.get();
if (!kb) {
return;
}
/* Reading input text as UTF-8 */
auto utf8_str = boost::locale::conv::to_utf<wchar_t>(utf8, utf8 + size, "UTF-8");
/* Converting to UTF-32 */
auto utf32_str = boost::locale::conv::utf_to_utf<char32_t>(utf8_str);
/* To HEX string */
auto hex_unicode = to_hex(utf32_str);
BOOST_LOG(debug) << "Unicode, typing U+"sv << hex_unicode;
/* pressing <CTRL> + <SHIFT> + U */
keyboard_ev(kb, KEY_LEFTCTRL, 1);
keyboard_ev(kb, KEY_LEFTSHIFT, 1);
keyboard_ev(kb, KEY_U, 1);
keyboard_ev(kb, KEY_U, 0);
/* input each HEX character */
for (auto &ch : hex_unicode) {
auto key_str = "KEY_"s + ch;
auto keycode = libevdev_event_code_from_name(EV_KEY, key_str.c_str());
if (keycode == -1) {
BOOST_LOG(warning) << "Unicode, unable to find keycode for: "sv << ch;
}
else {
keyboard_ev(kb, keycode, 1);
keyboard_ev(kb, keycode, 0);
}
}
/* releasing <SHIFT> and <CTRL> */
keyboard_ev(kb, KEY_LEFTSHIFT, 0);
keyboard_ev(kb, KEY_LEFTCTRL, 0);
}
int
alloc_gamepad(input_t &input, const gamepad_id_t &id, const gamepad_arrival_t &metadata, feedback_queue_t feedback_queue) {
return ((input_raw_t *) input.get())->alloc_gamepad(id, metadata, std::move(feedback_queue));
}
void
free_gamepad(input_t &input, int nr) {
((input_raw_t *) input.get())->clear_gamepad(nr);
}
void
gamepad_update(input_t &input, int nr, const gamepad_state_t &gamepad_state) {
TUPLE_2D_REF(uinput, gamepad_state_old, ((input_raw_t *) input.get())->gamepads[nr]);
auto bf = gamepad_state.buttonFlags ^ gamepad_state_old.buttonFlags;
auto bf_new = gamepad_state.buttonFlags;
if (bf) {
// up pressed == -1, down pressed == 1, else 0
if ((DPAD_UP | DPAD_DOWN) & bf) {
int button_state = bf_new & DPAD_UP ? -1 : (bf_new & DPAD_DOWN ? 1 : 0);
libevdev_uinput_write_event(uinput.get(), EV_ABS, ABS_HAT0Y, button_state);
}
if ((DPAD_LEFT | DPAD_RIGHT) & bf) {
int button_state = bf_new & DPAD_LEFT ? -1 : (bf_new & DPAD_RIGHT ? 1 : 0);
libevdev_uinput_write_event(uinput.get(), EV_ABS, ABS_HAT0X, button_state);
}
if (START & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_START, bf_new & START ? 1 : 0);
if (BACK & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_SELECT, bf_new & BACK ? 1 : 0);
if (LEFT_STICK & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_THUMBL, bf_new & LEFT_STICK ? 1 : 0);
if (RIGHT_STICK & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_THUMBR, bf_new & RIGHT_STICK ? 1 : 0);
if (LEFT_BUTTON & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_TL, bf_new & LEFT_BUTTON ? 1 : 0);
if (RIGHT_BUTTON & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_TR, bf_new & RIGHT_BUTTON ? 1 : 0);
if ((HOME | MISC_BUTTON) & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_MODE, bf_new & (HOME | MISC_BUTTON) ? 1 : 0);
if (A & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_SOUTH, bf_new & A ? 1 : 0);
if (B & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_EAST, bf_new & B ? 1 : 0);
if (X & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_NORTH, bf_new & X ? 1 : 0);
if (Y & bf) libevdev_uinput_write_event(uinput.get(), EV_KEY, BTN_WEST, bf_new & Y ? 1 : 0);
}
if (gamepad_state_old.lt != gamepad_state.lt) {
libevdev_uinput_write_event(uinput.get(), EV_ABS, ABS_Z, gamepad_state.lt);
}
if (gamepad_state_old.rt != gamepad_state.rt) {
libevdev_uinput_write_event(uinput.get(), EV_ABS, ABS_RZ, gamepad_state.rt);
}
if (gamepad_state_old.lsX != gamepad_state.lsX) {
libevdev_uinput_write_event(uinput.get(), EV_ABS, ABS_X, gamepad_state.lsX);
}
if (gamepad_state_old.lsY != gamepad_state.lsY) {
libevdev_uinput_write_event(uinput.get(), EV_ABS, ABS_Y, -gamepad_state.lsY);
}
if (gamepad_state_old.rsX != gamepad_state.rsX) {
libevdev_uinput_write_event(uinput.get(), EV_ABS, ABS_RX, gamepad_state.rsX);
}
if (gamepad_state_old.rsY != gamepad_state.rsY) {
libevdev_uinput_write_event(uinput.get(), EV_ABS, ABS_RY, -gamepad_state.rsY);
}
gamepad_state_old = gamepad_state;
libevdev_uinput_write_event(uinput.get(), EV_SYN, SYN_REPORT, 0);
}
constexpr auto NUM_TOUCH_SLOTS = 10;
constexpr auto DISTANCE_MAX = 1024;
constexpr auto PRESSURE_MAX = 4096;
constexpr int64_t INVALID_TRACKING_ID = -1;
// HACK: Contacts with very small pressure values get discarded by libinput, but
// we assume that the client has already excluded such errant touches. We enforce
// a minimum pressure value to prevent our touches from being discarded.
constexpr auto PRESSURE_MIN = 0.10f;
struct client_input_raw_t: public client_input_t {
client_input_raw_t(input_t &input) {
global = (input_raw_t *) input.get();
touch_slots.fill(INVALID_TRACKING_ID);
}
input_raw_t *global;
// Device state and handles for pen and touch input must be stored in the per-client
// input context, because each connected client may be sending their own independent
// pen/touch events. To maintain separation, we expose separate pen and touch devices
// for each client.
// Mapping of ABS_MT_SLOT/ABS_MT_TRACKING_ID -> pointerId
std::array<int64_t, NUM_TOUCH_SLOTS> touch_slots;
uinput_t touch_input;
uinput_t pen_input;
};
/**
* @brief Allocates a context to store per-client input data.
* @param input The global input context.
* @return A unique pointer to a per-client input data context.
*/
std::unique_ptr<client_input_t>
allocate_client_input_context(input_t &input) {
return std::make_unique<client_input_raw_t>(input);
}
/**
* @brief Retrieves the slot index for a given pointer ID.
* @param input The client-specific input context.
* @param pointerId The pointer ID sent from the client.
* @return Slot index or -1 if not found.
*/
int
slot_index_by_pointer_id(client_input_raw_t *input, uint32_t pointerId) {
for (int i = 0; i < input->touch_slots.size(); i++) {
if (input->touch_slots[i] == pointerId) {
return i;
}
}
return -1;
}
/**
* @brief Reserves a slot index for a new pointer ID.
* @param input The client-specific input context.
* @param pointerId The pointer ID sent from the client.
* @return Slot index or -1 if no unallocated slots remain.
*/
int
allocate_slot_index_for_pointer_id(client_input_raw_t *input, uint32_t pointerId) {
int i = slot_index_by_pointer_id(input, pointerId);
if (i >= 0) {
BOOST_LOG(warning) << "Pointer "sv << pointerId << " already down. Did the client drop an up/cancel event?"sv;
return i;
}
for (int i = 0; i < input->touch_slots.size(); i++) {
if (input->touch_slots[i] == INVALID_TRACKING_ID) {
input->touch_slots[i] = pointerId;
return i;
}
}
return -1;
}
/**
* @brief Sends a touch event to the OS.
* @param input The client-specific input context.
* @param touch_port The current viewport for translating to screen coordinates.
* @param touch The touch event.
*/
void
touch_update(client_input_t *input, const touch_port_t &touch_port, const touch_input_t &touch) {
auto raw = (client_input_raw_t *) input;
if (!raw->touch_input) {
int err = libevdev_uinput_create_from_device(raw->global->touchscreen_dev.get(), LIBEVDEV_UINPUT_OPEN_MANAGED, &raw->touch_input);
if (err) {
BOOST_LOG(error) << "Could not create Sunshine Touchscreen: "sv << strerror(-err);
return;
}
}
auto touch_input = raw->touch_input.get();
float pressure = std::max(PRESSURE_MIN, touch.pressureOrDistance);
if (touch.eventType == LI_TOUCH_EVENT_CANCEL_ALL) {
for (int i = 0; i < raw->touch_slots.size(); i++) {
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_SLOT, i);
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_TRACKING_ID, -1);
}
raw->touch_slots.fill(INVALID_TRACKING_ID);
libevdev_uinput_write_event(touch_input, EV_KEY, BTN_TOUCH, 0);
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_PRESSURE, 0);
libevdev_uinput_write_event(touch_input, EV_SYN, SYN_REPORT, 0);
return;
}
if (touch.eventType == LI_TOUCH_EVENT_CANCEL) {
// Stop tracking this slot
auto slot_index = slot_index_by_pointer_id(raw, touch.pointerId);
if (slot_index >= 0) {
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_SLOT, slot_index);
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_TRACKING_ID, -1);
raw->touch_slots[slot_index] = INVALID_TRACKING_ID;
// Raise BTN_TOUCH if no touches are down
if (std::all_of(raw->touch_slots.cbegin(), raw->touch_slots.cend(),
[](uint64_t pointer_id) { return pointer_id == INVALID_TRACKING_ID; })) {
libevdev_uinput_write_event(touch_input, EV_KEY, BTN_TOUCH, 0);
// This may have been the final slot down which was also being emulated
// through the single-touch axes. Reset ABS_PRESSURE to ensure code that
// uses ABS_PRESSURE instead of BTN_TOUCH will work properly.
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_PRESSURE, 0);
}
}
}
else if (touch.eventType == LI_TOUCH_EVENT_DOWN ||
touch.eventType == LI_TOUCH_EVENT_MOVE ||
touch.eventType == LI_TOUCH_EVENT_UP) {
int slot_index;
if (touch.eventType == LI_TOUCH_EVENT_DOWN) {
// Allocate a new slot for this new touch
slot_index = allocate_slot_index_for_pointer_id(raw, touch.pointerId);
if (slot_index < 0) {
BOOST_LOG(error) << "No unused pointer entries! Cancelling all active touches!"sv;
for (int i = 0; i < raw->touch_slots.size(); i++) {
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_SLOT, i);
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_TRACKING_ID, -1);
}
raw->touch_slots.fill(INVALID_TRACKING_ID);
libevdev_uinput_write_event(touch_input, EV_KEY, BTN_TOUCH, 0);
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_PRESSURE, 0);
libevdev_uinput_write_event(touch_input, EV_SYN, SYN_REPORT, 0);
// All slots are clear, so this should never fail on the second try
slot_index = allocate_slot_index_for_pointer_id(raw, touch.pointerId);
assert(slot_index >= 0);
}
}
else {
// Lookup the slot of the previous touch with this pointer ID
slot_index = slot_index_by_pointer_id(raw, touch.pointerId);
if (slot_index < 0) {
BOOST_LOG(warning) << "Pointer "sv << touch.pointerId << " is not down. Did the client drop a down event?"sv;
return;
}
}
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_SLOT, slot_index);
if (touch.eventType == LI_TOUCH_EVENT_UP) {
// Stop tracking this touch
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_TRACKING_ID, -1);
raw->touch_slots[slot_index] = INVALID_TRACKING_ID;
// Raise BTN_TOUCH if no touches are down
if (std::all_of(raw->touch_slots.cbegin(), raw->touch_slots.cend(),
[](uint64_t pointer_id) { return pointer_id == INVALID_TRACKING_ID; })) {
libevdev_uinput_write_event(touch_input, EV_KEY, BTN_TOUCH, 0);
// This may have been the final slot down which was also being emulated
// through the single-touch axes. Reset ABS_PRESSURE to ensure code that
// uses ABS_PRESSURE instead of BTN_TOUCH will work properly.
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_PRESSURE, 0);
}
}
else {
float x = touch.x * touch_port.width;
float y = touch.y * touch_port.height;
auto scaled_x = (int) std::lround((x + touch_port.offset_x) * ((float) target_touch_port.width / (float) touch_port.width));
auto scaled_y = (int) std::lround((y + touch_port.offset_y) * ((float) target_touch_port.height / (float) touch_port.height));
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_TRACKING_ID, slot_index);
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_POSITION_X, scaled_x);
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_POSITION_Y, scaled_y);
if (touch.pressureOrDistance) {
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_PRESSURE, PRESSURE_MAX * pressure);
}
else if (touch.eventType == LI_TOUCH_EVENT_DOWN) {
// Always report some moderate pressure value when down
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_PRESSURE, PRESSURE_MAX / 2);
}
if (touch.rotation != LI_ROT_UNKNOWN) {
// Convert our 0..360 range to -90..90 relative to Y axis
int adjusted_angle = touch.rotation;
if (touch.rotation > 90 && touch.rotation < 270) {
// Lower hemisphere
adjusted_angle = 180 - adjusted_angle;
}
// Wrap the value if it's out of range
if (adjusted_angle > 90) {
adjusted_angle -= 360;
}
else if (adjusted_angle < -90) {
adjusted_angle += 360;
}
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_ORIENTATION, adjusted_angle);
}
if (touch.contactAreaMajor) {
// Contact area comes from the input core scaled to the provided touch_port,
// however we need it rescaled to target_touch_port instead.
auto target_scaled_contact_area = input::scale_client_contact_area(
{ touch.contactAreaMajor * 65535.f, touch.contactAreaMinor * 65535.f },
touch.rotation,
{ target_touch_port.width / (touch_port.width * 65535.f),
target_touch_port.height / (touch_port.height * 65535.f) });
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_TOUCH_MAJOR, target_scaled_contact_area.first);
// scale_client_contact_area() will treat the contact area as circular (major == minor)
// if the minor axis wasn't specified, so we unconditionally report ABS_MT_TOUCH_MINOR.
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_MT_TOUCH_MINOR, target_scaled_contact_area.second);
}
// If this slot is the first active one, send our data through the single touch axes as well
for (int i = 0; i <= slot_index; i++) {
if (raw->touch_slots[i] != INVALID_TRACKING_ID) {
if (i == slot_index) {
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_X, scaled_x);
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_Y, scaled_y);
if (touch.pressureOrDistance) {
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_PRESSURE, PRESSURE_MAX * pressure);
}
else if (touch.eventType == LI_TOUCH_EVENT_DOWN) {
libevdev_uinput_write_event(touch_input, EV_ABS, ABS_PRESSURE, PRESSURE_MAX / 2);
}
}
break;
}
}
}
libevdev_uinput_write_event(touch_input, EV_SYN, SYN_REPORT, 0);
}
}
/**
* @brief Sends a pen event to the OS.
* @param input The client-specific input context.
* @param touch_port The current viewport for translating to screen coordinates.
* @param pen The pen event.
*/
void
pen_update(client_input_t *input, const touch_port_t &touch_port, const pen_input_t &pen) {
auto raw = (client_input_raw_t *) input;
if (!raw->pen_input) {
int err = libevdev_uinput_create_from_device(raw->global->pen_dev.get(), LIBEVDEV_UINPUT_OPEN_MANAGED, &raw->pen_input);
if (err) {
BOOST_LOG(error) << "Could not create Sunshine Pen: "sv << strerror(-err);
return;
}
}
auto pen_input = raw->pen_input.get();
float x = pen.x * touch_port.width;
float y = pen.y * touch_port.height;
float pressure = std::max(PRESSURE_MIN, pen.pressureOrDistance);
auto scaled_x = (int) std::lround((x + touch_port.offset_x) * ((float) target_touch_port.width / (float) touch_port.width));
auto scaled_y = (int) std::lround((y + touch_port.offset_y) * ((float) target_touch_port.height / (float) touch_port.height));
// First, process location updates for applicable events
switch (pen.eventType) {
case LI_TOUCH_EVENT_HOVER:
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_X, scaled_x);
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_Y, scaled_y);
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_PRESSURE, 0);
if (pen.pressureOrDistance) {
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_DISTANCE, DISTANCE_MAX * pen.pressureOrDistance);
}
else {
// Always report some moderate distance value when hovering to ensure hovering
// can be detected properly by code that uses ABS_DISTANCE.
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_DISTANCE, DISTANCE_MAX / 2);
}
break;
case LI_TOUCH_EVENT_DOWN:
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_X, scaled_x);
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_Y, scaled_y);
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_DISTANCE, 0);
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_PRESSURE, PRESSURE_MAX * pressure);
break;
case LI_TOUCH_EVENT_UP:
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_X, scaled_x);
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_Y, scaled_y);
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_PRESSURE, 0);
break;
case LI_TOUCH_EVENT_MOVE:
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_X, scaled_x);
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_Y, scaled_y);
// Update the pressure value if it's present, otherwise leave the default/previous value alone
if (pen.pressureOrDistance) {
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_PRESSURE, PRESSURE_MAX * pressure);
}
break;
}
if (pen.contactAreaMajor) {
// Contact area comes from the input core scaled to the provided touch_port,
// however we need it rescaled to target_touch_port instead.
auto target_scaled_contact_area = input::scale_client_contact_area(
{ pen.contactAreaMajor * 65535.f, pen.contactAreaMinor * 65535.f },
pen.rotation,
{ target_touch_port.width / (touch_port.width * 65535.f),
target_touch_port.height / (touch_port.height * 65535.f) });
// ABS_TOOL_WIDTH assumes a circular tool, so we just report the major axis
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_TOOL_WIDTH, target_scaled_contact_area.first);
}
// We require rotation and tilt to perform the conversion to X and Y tilt angles
if (pen.tilt != LI_TILT_UNKNOWN && pen.rotation != LI_ROT_UNKNOWN) {
auto rotation_rads = pen.rotation * (M_PI / 180.f);
auto tilt_rads = pen.tilt * (M_PI / 180.f);
auto r = std::sin(tilt_rads);
auto z = std::cos(tilt_rads);
// Convert polar coordinates into X and Y tilt angles
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_TILT_X, std::atan2(std::sin(-rotation_rads) * r, z) * 180.f / M_PI);
libevdev_uinput_write_event(pen_input, EV_ABS, ABS_TILT_Y, std::atan2(std::cos(-rotation_rads) * r, z) * 180.f / M_PI);
}
// Don't update tool type if we're cancelling or ending a touch/hover
if (pen.eventType != LI_TOUCH_EVENT_CANCEL &&
pen.eventType != LI_TOUCH_EVENT_CANCEL_ALL &&
pen.eventType != LI_TOUCH_EVENT_HOVER_LEAVE &&
pen.eventType != LI_TOUCH_EVENT_UP) {
// Update the tool type if it is known
switch (pen.toolType) {
default:
// We need to have _some_ tool type set, otherwise there's no way to know a tool is in
// range when hovering. If we don't know the type of tool, let's assume it's a pen.
if (pen.eventType != LI_TOUCH_EVENT_DOWN && pen.eventType != LI_TOUCH_EVENT_HOVER) {
break;
}
// fall-through
case LI_TOOL_TYPE_PEN:
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_TOOL_RUBBER, 0);
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_TOOL_PEN, 1);
break;
case LI_TOOL_TYPE_ERASER:
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_TOOL_PEN, 0);
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_TOOL_RUBBER, 1);
break;
}
}
// Next, process touch state changes
switch (pen.eventType) {
case LI_TOUCH_EVENT_CANCEL:
case LI_TOUCH_EVENT_CANCEL_ALL:
case LI_TOUCH_EVENT_HOVER_LEAVE:
case LI_TOUCH_EVENT_UP:
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_TOUCH, 0);
// Leaving hover range is detected by all BTN_TOOL_* being cleared
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_TOOL_PEN, 0);
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_TOOL_RUBBER, 0);
break;
case LI_TOUCH_EVENT_DOWN:
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_TOUCH, 1);
break;
}
// Finally, process pen buttons
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_STYLUS, !!(pen.penButtons & LI_PEN_BUTTON_PRIMARY));
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_STYLUS2, !!(pen.penButtons & LI_PEN_BUTTON_SECONDARY));
libevdev_uinput_write_event(pen_input, EV_KEY, BTN_STYLUS3, !!(pen.penButtons & LI_PEN_BUTTON_TERTIARY));
libevdev_uinput_write_event(pen_input, EV_SYN, SYN_REPORT, 0);
}
/**
* @brief Sends a gamepad touch event to the OS.
* @param input The global input context.
* @param touch The touch event.
*/
void
gamepad_touch(input_t &input, const gamepad_touch_t &touch) {
// Unimplemented feature - platform_caps::controller_touch
}
/**
* @brief Sends a gamepad motion event to the OS.
* @param input The global input context.
* @param motion The motion event.
*/
void
gamepad_motion(input_t &input, const gamepad_motion_t &motion) {
// Unimplemented
}
/**
* @brief Sends a gamepad battery event to the OS.
* @param input The global input context.
* @param battery The battery event.
*/
void
gamepad_battery(input_t &input, const gamepad_battery_t &battery) {
// Unimplemented
}
/**
* @brief Initialize a new keyboard and return it.
* @examples
* auto my_keyboard = keyboard();
* @examples_end
*/
evdev_t
keyboard() {
evdev_t dev { libevdev_new() };
libevdev_set_uniq(dev.get(), "Sunshine Keyboard");
libevdev_set_id_product(dev.get(), 0xDEAD);
libevdev_set_id_vendor(dev.get(), 0xBEEF);
libevdev_set_id_bustype(dev.get(), 0x3);
libevdev_set_id_version(dev.get(), 0x111);
libevdev_set_name(dev.get(), "Keyboard passthrough");
libevdev_enable_event_type(dev.get(), EV_KEY);
for (const auto &keycode : keycodes) {
libevdev_enable_event_code(dev.get(), EV_KEY, keycode.keycode, nullptr);
}
libevdev_enable_event_type(dev.get(), EV_MSC);
libevdev_enable_event_code(dev.get(), EV_MSC, MSC_SCAN, nullptr);
return dev;
}
/**
* @brief Initialize a new `uinput` virtual relative mouse and return it.
* @examples
* auto my_mouse = mouse_rel();
* @examples_end
*/
evdev_t
mouse_rel() {
evdev_t dev { libevdev_new() };
libevdev_set_uniq(dev.get(), "Sunshine Mouse (Rel)");
libevdev_set_id_product(dev.get(), 0x4038);
libevdev_set_id_vendor(dev.get(), 0x46D);
libevdev_set_id_bustype(dev.get(), 0x3);
libevdev_set_id_version(dev.get(), 0x111);
libevdev_set_name(dev.get(), "Logitech Wireless Mouse PID:4038");
libevdev_enable_event_type(dev.get(), EV_KEY);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_LEFT, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_RIGHT, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_MIDDLE, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_SIDE, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_EXTRA, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_FORWARD, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_BACK, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_TASK, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, 280, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, 281, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, 282, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, 283, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, 284, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, 285, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, 286, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, 287, nullptr);
libevdev_enable_event_type(dev.get(), EV_REL);
libevdev_enable_event_code(dev.get(), EV_REL, REL_X, nullptr);
libevdev_enable_event_code(dev.get(), EV_REL, REL_Y, nullptr);
libevdev_enable_event_code(dev.get(), EV_REL, REL_WHEEL, nullptr);
libevdev_enable_event_code(dev.get(), EV_REL, REL_WHEEL_HI_RES, nullptr);
libevdev_enable_event_code(dev.get(), EV_REL, REL_HWHEEL, nullptr);
libevdev_enable_event_code(dev.get(), EV_REL, REL_HWHEEL_HI_RES, nullptr);
libevdev_enable_event_type(dev.get(), EV_MSC);
libevdev_enable_event_code(dev.get(), EV_MSC, MSC_SCAN, nullptr);
return dev;
}
/**
* @brief Initialize a new `uinput` virtual absolute mouse and return it.
* @examples
* auto my_mouse = mouse_abs();
* @examples_end
*/
evdev_t
mouse_abs() {
evdev_t dev { libevdev_new() };
libevdev_set_uniq(dev.get(), "Sunshine Mouse (Abs)");
libevdev_set_id_product(dev.get(), 0xDEAD);
libevdev_set_id_vendor(dev.get(), 0xBEEF);
libevdev_set_id_bustype(dev.get(), 0x3);
libevdev_set_id_version(dev.get(), 0x111);
libevdev_set_name(dev.get(), "Mouse passthrough");
libevdev_enable_property(dev.get(), INPUT_PROP_DIRECT);
libevdev_enable_event_type(dev.get(), EV_KEY);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_LEFT, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_RIGHT, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_MIDDLE, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_SIDE, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_EXTRA, nullptr);
libevdev_enable_event_type(dev.get(), EV_MSC);
libevdev_enable_event_code(dev.get(), EV_MSC, MSC_SCAN, nullptr);
input_absinfo absx {
0,
0,
target_touch_port.width,
1,
0,
28
};
input_absinfo absy {
0,
0,
target_touch_port.height,
1,
0,
28
};
libevdev_enable_event_type(dev.get(), EV_ABS);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_X, &absx);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_Y, &absy);
return dev;
}
/**
* @brief Initialize a new `uinput` virtual touchscreen and return it.
* @examples
* auto my_touchscreen = touchscreen();
* @examples_end
*/
evdev_t
touchscreen() {
evdev_t dev { libevdev_new() };
libevdev_set_uniq(dev.get(), "Sunshine Touchscreen");
libevdev_set_id_product(dev.get(), 0xDEAD);
libevdev_set_id_vendor(dev.get(), 0xBEEF);
libevdev_set_id_bustype(dev.get(), 0x3);
libevdev_set_id_version(dev.get(), 0x111);
libevdev_set_name(dev.get(), "Touch passthrough");
libevdev_enable_property(dev.get(), INPUT_PROP_DIRECT);
constexpr auto RESOLUTION = 28;
input_absinfo abs_slot {
0,
0,
NUM_TOUCH_SLOTS - 1,
0,
0,
0
};
input_absinfo abs_tracking_id {
0,
0,
NUM_TOUCH_SLOTS - 1,
0,
0,
0
};
input_absinfo abs_x {
0,
0,
target_touch_port.width,
1,
0,
RESOLUTION
};
input_absinfo abs_y {
0,
0,
target_touch_port.height,
1,
0,
RESOLUTION
};
input_absinfo abs_pressure {
0,
0,
PRESSURE_MAX,
0,
0,
0
};
// Degrees of a half revolution
input_absinfo abs_orientation {
0,
-90,
90,
0,
0,
0
};
// Fractions of the full diagonal
input_absinfo abs_contact_area {
0,
0,
(__s32) std::sqrt(std::pow(target_touch_port.width, 2) + std::pow(target_touch_port.height, 2)),
1,
0,
RESOLUTION
};
libevdev_enable_event_type(dev.get(), EV_ABS);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_X, &abs_x);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_Y, &abs_y);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_PRESSURE, &abs_pressure);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_MT_SLOT, &abs_slot);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_MT_TRACKING_ID, &abs_tracking_id);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_MT_POSITION_X, &abs_x);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_MT_POSITION_Y, &abs_y);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_MT_PRESSURE, &abs_pressure);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_MT_ORIENTATION, &abs_orientation);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_MT_TOUCH_MAJOR, &abs_contact_area);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_MT_TOUCH_MINOR, &abs_contact_area);
libevdev_enable_event_type(dev.get(), EV_KEY);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_TOUCH, nullptr);
return dev;
}
/**
* @brief Initialize a new `uinput` virtual pen pad and return it.
* @examples
* auto my_penpad = penpad();
* @examples_end
*/
evdev_t
penpad() {
evdev_t dev { libevdev_new() };
libevdev_set_uniq(dev.get(), "Sunshine Pen");
libevdev_set_id_product(dev.get(), 0xDEAD);
libevdev_set_id_vendor(dev.get(), 0xBEEF);
libevdev_set_id_bustype(dev.get(), 0x3);
libevdev_set_id_version(dev.get(), 0x111);
libevdev_set_name(dev.get(), "Pen passthrough");
libevdev_enable_property(dev.get(), INPUT_PROP_DIRECT);
constexpr auto RESOLUTION = 28;
input_absinfo abs_x {
0,
0,
target_touch_port.width,
1,
0,
RESOLUTION
};
input_absinfo abs_y {
0,
0,
target_touch_port.height,
1,
0,
RESOLUTION
};
input_absinfo abs_pressure {
0,
0,
PRESSURE_MAX,
0,
0,
0
};
input_absinfo abs_distance {
0,
0,
DISTANCE_MAX,
0,
0,
0
};
// Degrees of tilt
input_absinfo abs_tilt {
0,
-90,
90,
0,
0,
0
};
// Fractions of the full diagonal
input_absinfo abs_contact_area {
0,
0,
(__s32) std::sqrt(std::pow(target_touch_port.width, 2) + std::pow(target_touch_port.height, 2)),
1,
0,
RESOLUTION
};
libevdev_enable_event_type(dev.get(), EV_ABS);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_X, &abs_x);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_Y, &abs_y);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_PRESSURE, &abs_pressure);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_DISTANCE, &abs_distance);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_TILT_X, &abs_tilt);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_TILT_Y, &abs_tilt);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_TOOL_WIDTH, &abs_contact_area);
libevdev_enable_event_type(dev.get(), EV_KEY);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_TOUCH, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_TOOL_PEN, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_TOOL_RUBBER, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_STYLUS, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_STYLUS2, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_STYLUS3, nullptr);
return dev;
}
/**
* @brief Initialize a new `uinput` virtual X360 gamepad and return it.
* @examples
* auto my_x360 = x360();
* @examples_end
*/
evdev_t
x360() {
evdev_t dev { libevdev_new() };
input_absinfo stick {
0,
-32768, 32767,
16,
128,
0
};
input_absinfo trigger {
0,
0, 255,
0,
0,
0
};
input_absinfo dpad {
0,
-1, 1,
0,
0,
0
};
libevdev_set_uniq(dev.get(), "Sunshine Gamepad");
libevdev_set_id_product(dev.get(), 0x28E);
libevdev_set_id_vendor(dev.get(), 0x45E);
libevdev_set_id_bustype(dev.get(), 0x3);
libevdev_set_id_version(dev.get(), 0x110);
libevdev_set_name(dev.get(), "Microsoft X-Box 360 pad");
libevdev_enable_event_type(dev.get(), EV_KEY);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_WEST, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_EAST, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_NORTH, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_SOUTH, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_THUMBL, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_THUMBR, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_TR, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_TL, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_SELECT, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_MODE, nullptr);
libevdev_enable_event_code(dev.get(), EV_KEY, BTN_START, nullptr);
libevdev_enable_event_type(dev.get(), EV_ABS);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_HAT0Y, &dpad);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_HAT0X, &dpad);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_Z, &trigger);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_RZ, &trigger);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_X, &stick);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_RX, &stick);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_Y, &stick);
libevdev_enable_event_code(dev.get(), EV_ABS, ABS_RY, &stick);
libevdev_enable_event_type(dev.get(), EV_FF);
libevdev_enable_event_code(dev.get(), EV_FF, FF_RUMBLE, nullptr);
libevdev_enable_event_code(dev.get(), EV_FF, FF_CONSTANT, nullptr);
libevdev_enable_event_code(dev.get(), EV_FF, FF_PERIODIC, nullptr);
libevdev_enable_event_code(dev.get(), EV_FF, FF_SINE, nullptr);
libevdev_enable_event_code(dev.get(), EV_FF, FF_RAMP, nullptr);
libevdev_enable_event_code(dev.get(), EV_FF, FF_GAIN, nullptr);
return dev;
}
/**
* @brief Initialize the input system and return it.
* @examples
* auto my_input = input();
* @examples_end
*/
input_t
input() {
input_t result { new input_raw_t() };
auto &gp = *(input_raw_t *) result.get();
gp.rumble_ctx = notifications.ref();
gp.gamepads.resize(MAX_GAMEPADS);
// Ensure starting from clean slate
gp.clear();
gp.keyboard_dev = keyboard();
gp.mouse_rel_dev = mouse_rel();
gp.mouse_abs_dev = mouse_abs();
gp.touchscreen_dev = touchscreen();
gp.pen_dev = penpad();
gp.gamepad_dev = x360();
gp.create_mouse_rel();
gp.create_mouse_abs();
gp.create_keyboard();
// If we do not have a keyboard or mouse, fall back to XTest
if (!gp.mouse_rel_input || !gp.mouse_abs_input || !gp.keyboard_input) {
#ifdef SUNSHINE_BUILD_X11
if (x11::init() || x11::tst::init()) {
BOOST_LOG(fatal) << "Unable to create virtual input devices or use XTest fallback! Are you a member of the 'input' group?"sv;
}
else {
BOOST_LOG(error) << "Falling back to XTest for virtual input! Are you a member of the 'input' group?"sv;
x11::InitThreads();
gp.display = x11::OpenDisplay(NULL);
}
#else
BOOST_LOG(fatal) << "Unable to create virtual input devices! Are you a member of the 'input' group?"sv;
#endif
}
else {
has_uinput = true;
}
return result;
}
void
freeInput(void *p) {
auto *input = (input_raw_t *) p;
delete input;
}
std::vector<supported_gamepad_t> &
supported_gamepads(input_t *input) {
static std::vector gamepads {
supported_gamepad_t { "x360", true, "" },
};
return gamepads;
}
/**
* @brief Returns the supported platform capabilities to advertise to the client.
* @return Capability flags.
*/
platform_caps::caps_t
get_capabilities() {
platform_caps::caps_t caps = 0;
// Pen and touch emulation requires uinput
if (has_uinput && config::input.native_pen_touch) {
caps |= platform_caps::pen_touch;
}
return caps;
}
} // namespace platf
| 87,111
|
C++
|
.cpp
| 2,161
| 33.803332
| 177
| 0.617956
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,004
|
inputtino_keyboard.cpp
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_keyboard.cpp
|
/**
* @file src/platform/linux/input/inputtino_keyboard.cpp
* @brief Definitions for inputtino keyboard input handling.
*/
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include "inputtino_common.h"
#include "inputtino_keyboard.h"
using namespace std::literals;
namespace platf::keyboard {
/**
* Takes an UTF-32 encoded string and returns a hex string representation of the bytes (uppercase)
*
* ex: ['👱'] = "1F471" // see UTF encoding at https://www.compart.com/en/unicode/U+1F471
*
* adapted from: https://stackoverflow.com/a/7639754
*/
std::string
to_hex(const std::basic_string<char32_t> &str) {
std::stringstream ss;
ss << std::hex << std::setfill('0');
for (const auto &ch : str) {
ss << static_cast<uint32_t>(ch);
}
std::string hex_unicode(ss.str());
std::ranges::transform(hex_unicode, hex_unicode.begin(), ::toupper);
return hex_unicode;
}
/**
* A map of linux scan code -> Moonlight keyboard code
*/
static const std::map<short, short> key_mappings = {
{ KEY_BACKSPACE, 0x08 }, { KEY_TAB, 0x09 }, { KEY_ENTER, 0x0D }, { KEY_LEFTSHIFT, 0x10 },
{ KEY_LEFTCTRL, 0x11 }, { KEY_CAPSLOCK, 0x14 }, { KEY_ESC, 0x1B }, { KEY_SPACE, 0x20 },
{ KEY_PAGEUP, 0x21 }, { KEY_PAGEDOWN, 0x22 }, { KEY_END, 0x23 }, { KEY_HOME, 0x24 },
{ KEY_LEFT, 0x25 }, { KEY_UP, 0x26 }, { KEY_RIGHT, 0x27 }, { KEY_DOWN, 0x28 },
{ KEY_SYSRQ, 0x2C }, { KEY_INSERT, 0x2D }, { KEY_DELETE, 0x2E }, { KEY_0, 0x30 },
{ KEY_1, 0x31 }, { KEY_2, 0x32 }, { KEY_3, 0x33 }, { KEY_4, 0x34 },
{ KEY_5, 0x35 }, { KEY_6, 0x36 }, { KEY_7, 0x37 }, { KEY_8, 0x38 },
{ KEY_9, 0x39 }, { KEY_A, 0x41 }, { KEY_B, 0x42 }, { KEY_C, 0x43 },
{ KEY_D, 0x44 }, { KEY_E, 0x45 }, { KEY_F, 0x46 }, { KEY_G, 0x47 },
{ KEY_H, 0x48 }, { KEY_I, 0x49 }, { KEY_J, 0x4A }, { KEY_K, 0x4B },
{ KEY_L, 0x4C }, { KEY_M, 0x4D }, { KEY_N, 0x4E }, { KEY_O, 0x4F },
{ KEY_P, 0x50 }, { KEY_Q, 0x51 }, { KEY_R, 0x52 }, { KEY_S, 0x53 },
{ KEY_T, 0x54 }, { KEY_U, 0x55 }, { KEY_V, 0x56 }, { KEY_W, 0x57 },
{ KEY_X, 0x58 }, { KEY_Y, 0x59 }, { KEY_Z, 0x5A }, { KEY_LEFTMETA, 0x5B },
{ KEY_RIGHTMETA, 0x5C }, { KEY_KP0, 0x60 }, { KEY_KP1, 0x61 }, { KEY_KP2, 0x62 },
{ KEY_KP3, 0x63 }, { KEY_KP4, 0x64 }, { KEY_KP5, 0x65 }, { KEY_KP6, 0x66 },
{ KEY_KP7, 0x67 }, { KEY_KP8, 0x68 }, { KEY_KP9, 0x69 }, { KEY_KPASTERISK, 0x6A },
{ KEY_KPPLUS, 0x6B }, { KEY_KPMINUS, 0x6D }, { KEY_KPDOT, 0x6E }, { KEY_KPSLASH, 0x6F },
{ KEY_F1, 0x70 }, { KEY_F2, 0x71 }, { KEY_F3, 0x72 }, { KEY_F4, 0x73 },
{ KEY_F5, 0x74 }, { KEY_F6, 0x75 }, { KEY_F7, 0x76 }, { KEY_F8, 0x77 },
{ KEY_F9, 0x78 }, { KEY_F10, 0x79 }, { KEY_F11, 0x7A }, { KEY_F12, 0x7B },
{ KEY_NUMLOCK, 0x90 }, { KEY_SCROLLLOCK, 0x91 }, { KEY_LEFTSHIFT, 0xA0 }, { KEY_RIGHTSHIFT, 0xA1 },
{ KEY_LEFTCTRL, 0xA2 }, { KEY_RIGHTCTRL, 0xA3 }, { KEY_LEFTALT, 0xA4 }, { KEY_RIGHTALT, 0xA5 },
{ KEY_SEMICOLON, 0xBA }, { KEY_EQUAL, 0xBB }, { KEY_COMMA, 0xBC }, { KEY_MINUS, 0xBD },
{ KEY_DOT, 0xBE }, { KEY_SLASH, 0xBF }, { KEY_GRAVE, 0xC0 }, { KEY_LEFTBRACE, 0xDB },
{ KEY_BACKSLASH, 0xDC }, { KEY_RIGHTBRACE, 0xDD }, { KEY_APOSTROPHE, 0xDE }, { KEY_102ND, 0xE2 }
};
void
update(input_raw_t *raw, uint16_t modcode, bool release, uint8_t flags) {
if (raw->keyboard) {
if (release) {
(*raw->keyboard).release(modcode);
}
else {
(*raw->keyboard).press(modcode);
}
}
}
void
unicode(input_raw_t *raw, char *utf8, int size) {
if (raw->keyboard) {
/* Reading input text as UTF-8 */
auto utf8_str = boost::locale::conv::to_utf<wchar_t>(utf8, utf8 + size, "UTF-8");
/* Converting to UTF-32 */
auto utf32_str = boost::locale::conv::utf_to_utf<char32_t>(utf8_str);
/* To HEX string */
auto hex_unicode = to_hex(utf32_str);
BOOST_LOG(debug) << "Unicode, typing U+"sv << hex_unicode;
/* pressing <CTRL> + <SHIFT> + U */
(*raw->keyboard).press(0xA2); // LEFTCTRL
(*raw->keyboard).press(0xA0); // LEFTSHIFT
(*raw->keyboard).press(0x55); // U
(*raw->keyboard).release(0x55); // U
/* input each HEX character */
for (auto &ch : hex_unicode) {
auto key_str = "KEY_"s + ch;
auto keycode = libevdev_event_code_from_name(EV_KEY, key_str.c_str());
auto wincode = key_mappings.find(keycode);
if (keycode == -1 || wincode == key_mappings.end()) {
BOOST_LOG(warning) << "Unicode, unable to find keycode for: "sv << ch;
}
else {
(*raw->keyboard).press(wincode->second);
(*raw->keyboard).release(wincode->second);
}
}
/* releasing <SHIFT> and <CTRL> */
(*raw->keyboard).release(0xA0); // LEFTSHIFT
(*raw->keyboard).release(0xA2); // LEFTCTRL
}
}
} // namespace platf::keyboard
| 5,022
|
C++
|
.cpp
| 109
| 40.917431
| 103
| 0.577025
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,005
|
inputtino_mouse.cpp
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_mouse.cpp
|
/**
* @file src/platform/linux/input/inputtino_mouse.cpp
* @brief Definitions for inputtino mouse input handling.
*/
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include "inputtino_common.h"
#include "inputtino_mouse.h"
using namespace std::literals;
namespace platf::mouse {
void
move(input_raw_t *raw, int deltaX, int deltaY) {
if (raw->mouse) {
(*raw->mouse).move(deltaX, deltaY);
}
}
void
move_abs(input_raw_t *raw, const touch_port_t &touch_port, float x, float y) {
if (raw->mouse) {
(*raw->mouse).move_abs(x, y, touch_port.width, touch_port.height);
}
}
void
button(input_raw_t *raw, int button, bool release) {
if (raw->mouse) {
inputtino::Mouse::MOUSE_BUTTON btn_type;
switch (button) {
case BUTTON_LEFT:
btn_type = inputtino::Mouse::LEFT;
break;
case BUTTON_MIDDLE:
btn_type = inputtino::Mouse::MIDDLE;
break;
case BUTTON_RIGHT:
btn_type = inputtino::Mouse::RIGHT;
break;
case BUTTON_X1:
btn_type = inputtino::Mouse::SIDE;
break;
case BUTTON_X2:
btn_type = inputtino::Mouse::EXTRA;
break;
default:
BOOST_LOG(warning) << "Unknown mouse button: " << button;
return;
}
if (release) {
(*raw->mouse).release(btn_type);
}
else {
(*raw->mouse).press(btn_type);
}
}
}
void
scroll(input_raw_t *raw, int high_res_distance) {
if (raw->mouse) {
(*raw->mouse).vertical_scroll(high_res_distance);
}
}
void
hscroll(input_raw_t *raw, int high_res_distance) {
if (raw->mouse) {
(*raw->mouse).horizontal_scroll(high_res_distance);
}
}
util::point_t
get_location(input_raw_t *raw) {
if (raw->mouse) {
// TODO: decide what to do after https://github.com/games-on-whales/inputtino/issues/6 is resolved.
// TODO: auto x = (*raw->mouse).get_absolute_x();
// TODO: auto y = (*raw->mouse).get_absolute_y();
return { 0, 0 };
}
return { 0, 0 };
}
} // namespace platf::mouse
| 2,287
|
C++
|
.cpp
| 82
| 22.317073
| 105
| 0.6041
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,006
|
inputtino_gamepad.cpp
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_gamepad.cpp
|
/**
* @file src/platform/linux/input/inputtino_gamepad.cpp
* @brief Definitions for inputtino gamepad input handling.
*/
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include "inputtino_common.h"
#include "inputtino_gamepad.h"
using namespace std::literals;
namespace platf::gamepad {
enum GamepadStatus {
UHID_NOT_AVAILABLE = 0, ///< UHID is not available
UINPUT_NOT_AVAILABLE, ///< UINPUT is not available
XINPUT_NOT_AVAILABLE, ///< XINPUT is not available
GAMEPAD_STATUS ///< Helper to indicate the number of status
};
auto
create_xbox_one() {
return inputtino::XboxOneJoypad::create({ .name = "Sunshine X-Box One (virtual) pad",
// https://github.com/torvalds/linux/blob/master/drivers/input/joystick/xpad.c#L147
.vendor_id = 0x045E,
.product_id = 0x02EA,
.version = 0x0408 });
}
auto
create_switch() {
return inputtino::SwitchJoypad::create({ .name = "Sunshine Nintendo (virtual) pad",
// https://github.com/torvalds/linux/blob/master/drivers/hid/hid-ids.h#L981
.vendor_id = 0x057e,
.product_id = 0x2009,
.version = 0x8111 });
}
auto
create_ds5() {
return inputtino::PS5Joypad::create({ .name = "Sunshine DualSense (virtual) pad",
.vendor_id = 0x054C,
.product_id = 0x0CE6,
.version = 0x8111 });
}
int
alloc(input_raw_t *raw, const gamepad_id_t &id, const gamepad_arrival_t &metadata, feedback_queue_t feedback_queue) {
ControllerType selectedGamepadType;
if (config::input.gamepad == "xone"sv) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be Xbox One controller (manual selection)"sv;
selectedGamepadType = XboxOneWired;
}
else if (config::input.gamepad == "ds5"sv) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be DualSense 5 controller (manual selection)"sv;
selectedGamepadType = DualSenseWired;
}
else if (config::input.gamepad == "switch"sv) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be Nintendo Pro controller (manual selection)"sv;
selectedGamepadType = SwitchProWired;
}
else if (metadata.type == LI_CTYPE_XBOX) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be Xbox One controller (auto-selected by client-reported type)"sv;
selectedGamepadType = XboxOneWired;
}
else if (metadata.type == LI_CTYPE_PS) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be DualShock 5 controller (auto-selected by client-reported type)"sv;
selectedGamepadType = DualSenseWired;
}
else if (metadata.type == LI_CTYPE_NINTENDO) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be Nintendo Pro controller (auto-selected by client-reported type)"sv;
selectedGamepadType = SwitchProWired;
}
else if (config::input.motion_as_ds4 && (metadata.capabilities & (LI_CCAP_ACCEL | LI_CCAP_GYRO))) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be DualShock 5 controller (auto-selected by motion sensor presence)"sv;
selectedGamepadType = DualSenseWired;
}
else if (config::input.touchpad_as_ds4 && (metadata.capabilities & LI_CCAP_TOUCHPAD)) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be DualShock 5 controller (auto-selected by touchpad presence)"sv;
selectedGamepadType = DualSenseWired;
}
else {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be Xbox One controller (default)"sv;
selectedGamepadType = XboxOneWired;
}
if (selectedGamepadType == XboxOneWired || selectedGamepadType == SwitchProWired) {
if (metadata.capabilities & (LI_CCAP_ACCEL | LI_CCAP_GYRO)) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " has motion sensors, but they are not usable when emulating a joypad different from DS5"sv;
}
if (metadata.capabilities & LI_CCAP_TOUCHPAD) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " has a touchpad, but it is not usable when emulating a joypad different from DS5"sv;
}
if (metadata.capabilities & LI_CCAP_RGB_LED) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " has an RGB LED, but it is not usable when emulating a joypad different from DS5"sv;
}
}
else if (selectedGamepadType == DualSenseWired) {
if (!(metadata.capabilities & (LI_CCAP_ACCEL | LI_CCAP_GYRO))) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " is emulating a DualShock 5 controller, but the client gamepad doesn't have motion sensors active"sv;
}
if (!(metadata.capabilities & LI_CCAP_TOUCHPAD)) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " is emulating a DualShock 5 controller, but the client gamepad doesn't have a touchpad"sv;
}
}
auto gamepad = std::make_shared<joypad_state>(joypad_state {});
auto on_rumble_fn = [feedback_queue, idx = id.clientRelativeIndex, gamepad](int low_freq, int high_freq) {
// Don't resend duplicate rumble data
if (gamepad->last_rumble.type == platf::gamepad_feedback_e::rumble && gamepad->last_rumble.data.rumble.lowfreq == low_freq && gamepad->last_rumble.data.rumble.highfreq == high_freq) {
return;
}
gamepad_feedback_msg_t msg = gamepad_feedback_msg_t::make_rumble(idx, low_freq, high_freq);
feedback_queue->raise(msg);
gamepad->last_rumble = msg;
};
switch (selectedGamepadType) {
case XboxOneWired: {
auto xOne = create_xbox_one();
if (xOne) {
(*xOne).set_on_rumble(on_rumble_fn);
gamepad->joypad = std::make_unique<joypads_t>(std::move(*xOne));
raw->gamepads[id.globalIndex] = std::move(gamepad);
return 0;
}
else {
BOOST_LOG(warning) << "Unable to create virtual Xbox One controller: " << xOne.getErrorMessage();
return -1;
}
}
case SwitchProWired: {
auto switchPro = create_switch();
if (switchPro) {
(*switchPro).set_on_rumble(on_rumble_fn);
gamepad->joypad = std::make_unique<joypads_t>(std::move(*switchPro));
raw->gamepads[id.globalIndex] = std::move(gamepad);
return 0;
}
else {
BOOST_LOG(warning) << "Unable to create virtual Switch Pro controller: " << switchPro.getErrorMessage();
return -1;
}
}
case DualSenseWired: {
auto ds5 = create_ds5();
if (ds5) {
(*ds5).set_on_rumble(on_rumble_fn);
(*ds5).set_on_led([feedback_queue, idx = id.clientRelativeIndex, gamepad](int r, int g, int b) {
// Don't resend duplicate LED data
if (gamepad->last_rgb_led.type == platf::gamepad_feedback_e::set_rgb_led && gamepad->last_rgb_led.data.rgb_led.r == r && gamepad->last_rgb_led.data.rgb_led.g == g && gamepad->last_rgb_led.data.rgb_led.b == b) {
return;
}
auto msg = gamepad_feedback_msg_t::make_rgb_led(idx, r, g, b);
feedback_queue->raise(msg);
gamepad->last_rgb_led = msg;
});
// Activate the motion sensors
feedback_queue->raise(gamepad_feedback_msg_t::make_motion_event_state(id.clientRelativeIndex, LI_MOTION_TYPE_ACCEL, 100));
feedback_queue->raise(gamepad_feedback_msg_t::make_motion_event_state(id.clientRelativeIndex, LI_MOTION_TYPE_GYRO, 100));
gamepad->joypad = std::make_unique<joypads_t>(std::move(*ds5));
raw->gamepads[id.globalIndex] = std::move(gamepad);
return 0;
}
else {
BOOST_LOG(warning) << "Unable to create virtual DualShock 5 controller: " << ds5.getErrorMessage();
return -1;
}
}
}
return -1;
}
void
free(input_raw_t *raw, int nr) {
// This will call the destructor which in turn will stop the background threads for rumble and LED (and ultimately remove the joypad device)
raw->gamepads[nr]->joypad.reset();
raw->gamepads[nr].reset();
}
void
update(input_raw_t *raw, int nr, const gamepad_state_t &gamepad_state) {
auto gamepad = raw->gamepads[nr];
if (!gamepad) {
return;
}
std::visit([gamepad_state](inputtino::Joypad &gc) {
gc.set_pressed_buttons(gamepad_state.buttonFlags);
gc.set_stick(inputtino::Joypad::LS, gamepad_state.lsX, gamepad_state.lsY);
gc.set_stick(inputtino::Joypad::RS, gamepad_state.rsX, gamepad_state.rsY);
gc.set_triggers(gamepad_state.lt, gamepad_state.rt);
},
*gamepad->joypad);
}
void
touch(input_raw_t *raw, const gamepad_touch_t &touch) {
auto gamepad = raw->gamepads[touch.id.globalIndex];
if (!gamepad) {
return;
}
// Only the PS5 controller supports touch input
if (std::holds_alternative<inputtino::PS5Joypad>(*gamepad->joypad)) {
if (touch.pressure > 0.5) {
std::get<inputtino::PS5Joypad>(*gamepad->joypad).place_finger(touch.pointerId, touch.x * inputtino::PS5Joypad::touchpad_width, touch.y * inputtino::PS5Joypad::touchpad_height);
}
else {
std::get<inputtino::PS5Joypad>(*gamepad->joypad).release_finger(touch.pointerId);
}
}
}
void
motion(input_raw_t *raw, const gamepad_motion_t &motion) {
auto gamepad = raw->gamepads[motion.id.globalIndex];
if (!gamepad) {
return;
}
// Only the PS5 controller supports motion
if (std::holds_alternative<inputtino::PS5Joypad>(*gamepad->joypad)) {
switch (motion.motionType) {
case LI_MOTION_TYPE_ACCEL:
std::get<inputtino::PS5Joypad>(*gamepad->joypad).set_motion(inputtino::PS5Joypad::ACCELERATION, motion.x, motion.y, motion.z);
break;
case LI_MOTION_TYPE_GYRO:
std::get<inputtino::PS5Joypad>(*gamepad->joypad).set_motion(inputtino::PS5Joypad::GYROSCOPE, deg2rad(motion.x), deg2rad(motion.y), deg2rad(motion.z));
break;
}
}
}
void
battery(input_raw_t *raw, const gamepad_battery_t &battery) {
auto gamepad = raw->gamepads[battery.id.globalIndex];
if (!gamepad) {
return;
}
// Only the PS5 controller supports battery reports
if (std::holds_alternative<inputtino::PS5Joypad>(*gamepad->joypad)) {
inputtino::PS5Joypad::BATTERY_STATE state;
switch (battery.state) {
case LI_BATTERY_STATE_CHARGING:
state = inputtino::PS5Joypad::BATTERY_CHARGHING;
break;
case LI_BATTERY_STATE_DISCHARGING:
state = inputtino::PS5Joypad::BATTERY_DISCHARGING;
break;
case LI_BATTERY_STATE_FULL:
state = inputtino::PS5Joypad::BATTERY_FULL;
break;
case LI_BATTERY_STATE_UNKNOWN:
case LI_BATTERY_STATE_NOT_PRESENT:
default:
return;
}
if (battery.percentage != LI_BATTERY_PERCENTAGE_UNKNOWN) {
std::get<inputtino::PS5Joypad>(*gamepad->joypad).set_battery(state, battery.percentage);
}
}
}
std::vector<supported_gamepad_t> &
supported_gamepads(input_t *input) {
if (!input) {
static std::vector gps {
supported_gamepad_t { "auto", true, "" },
supported_gamepad_t { "xone", false, "" },
supported_gamepad_t { "ds5", false, "" },
supported_gamepad_t { "switch", false, "" },
};
return gps;
}
auto ds5 = create_ds5();
auto switchPro = create_switch();
auto xOne = create_xbox_one();
static std::vector gps {
supported_gamepad_t { "auto", true, "" },
supported_gamepad_t { "xone", static_cast<bool>(xOne), !xOne ? xOne.getErrorMessage() : "" },
supported_gamepad_t { "ds5", static_cast<bool>(ds5), !ds5 ? ds5.getErrorMessage() : "" },
supported_gamepad_t { "switch", static_cast<bool>(switchPro), !switchPro ? switchPro.getErrorMessage() : "" },
};
for (auto &[name, is_enabled, reason_disabled] : gps) {
if (!is_enabled) {
BOOST_LOG(warning) << "Gamepad " << name << " is disabled due to " << reason_disabled;
}
}
return gps;
}
} // namespace platf::gamepad
| 12,252
|
C++
|
.cpp
| 278
| 37.366906
| 222
| 0.644119
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,007
|
inputtino.cpp
|
LizardByte_Sunshine/src/platform/linux/input/inputtino.cpp
|
/**
* @file src/platform/linux/input/inputtino.cpp
* @brief Definitions for the inputtino Linux input handling.
*/
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/config.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include "inputtino_common.h"
#include "inputtino_gamepad.h"
#include "inputtino_keyboard.h"
#include "inputtino_mouse.h"
#include "inputtino_pen.h"
#include "inputtino_touch.h"
using namespace std::literals;
namespace platf {
input_t
input() {
return { new input_raw_t() };
}
std::unique_ptr<client_input_t>
allocate_client_input_context(input_t &input) {
return std::make_unique<client_input_raw_t>(input);
}
void
freeInput(void *p) {
auto *input = (input_raw_t *) p;
delete input;
}
void
move_mouse(input_t &input, int deltaX, int deltaY) {
auto raw = (input_raw_t *) input.get();
platf::mouse::move(raw, deltaX, deltaY);
}
void
abs_mouse(input_t &input, const touch_port_t &touch_port, float x, float y) {
auto raw = (input_raw_t *) input.get();
platf::mouse::move_abs(raw, touch_port, x, y);
}
void
button_mouse(input_t &input, int button, bool release) {
auto raw = (input_raw_t *) input.get();
platf::mouse::button(raw, button, release);
}
void
scroll(input_t &input, int high_res_distance) {
auto raw = (input_raw_t *) input.get();
platf::mouse::scroll(raw, high_res_distance);
}
void
hscroll(input_t &input, int high_res_distance) {
auto raw = (input_raw_t *) input.get();
platf::mouse::hscroll(raw, high_res_distance);
}
void
keyboard_update(input_t &input, uint16_t modcode, bool release, uint8_t flags) {
auto raw = (input_raw_t *) input.get();
platf::keyboard::update(raw, modcode, release, flags);
}
void
unicode(input_t &input, char *utf8, int size) {
auto raw = (input_raw_t *) input.get();
platf::keyboard::unicode(raw, utf8, size);
}
void
touch_update(client_input_t *input, const touch_port_t &touch_port, const touch_input_t &touch) {
auto raw = (client_input_raw_t *) input;
platf::touch::update(raw, touch_port, touch);
}
void
pen_update(client_input_t *input, const touch_port_t &touch_port, const pen_input_t &pen) {
auto raw = (client_input_raw_t *) input;
platf::pen::update(raw, touch_port, pen);
}
int
alloc_gamepad(input_t &input, const gamepad_id_t &id, const gamepad_arrival_t &metadata, feedback_queue_t feedback_queue) {
auto raw = (input_raw_t *) input.get();
return platf::gamepad::alloc(raw, id, metadata, feedback_queue);
}
void
free_gamepad(input_t &input, int nr) {
auto raw = (input_raw_t *) input.get();
platf::gamepad::free(raw, nr);
}
void
gamepad_update(input_t &input, int nr, const gamepad_state_t &gamepad_state) {
auto raw = (input_raw_t *) input.get();
platf::gamepad::update(raw, nr, gamepad_state);
}
void
gamepad_touch(input_t &input, const gamepad_touch_t &touch) {
auto raw = (input_raw_t *) input.get();
platf::gamepad::touch(raw, touch);
}
void
gamepad_motion(input_t &input, const gamepad_motion_t &motion) {
auto raw = (input_raw_t *) input.get();
platf::gamepad::motion(raw, motion);
}
void
gamepad_battery(input_t &input, const gamepad_battery_t &battery) {
auto raw = (input_raw_t *) input.get();
platf::gamepad::battery(raw, battery);
}
platform_caps::caps_t
get_capabilities() {
platform_caps::caps_t caps = 0;
// TODO: if has_uinput
caps |= platform_caps::pen_touch;
// We support controller touchpad input only when emulating the PS5 controller
if (config::input.gamepad == "ds5"sv || config::input.gamepad == "auto"sv) {
caps |= platform_caps::controller_touch;
}
return caps;
}
util::point_t
get_mouse_loc(input_t &input) {
auto raw = (input_raw_t *) input.get();
return platf::mouse::get_location(raw);
}
std::vector<supported_gamepad_t> &
supported_gamepads(input_t *input) {
return platf::gamepad::supported_gamepads(input);
}
} // namespace platf
| 4,116
|
C++
|
.cpp
| 126
| 28.984127
| 125
| 0.671713
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,008
|
inputtino_touch.cpp
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_touch.cpp
|
/**
* @file src/platform/linux/input/inputtino_touch.cpp
* @brief Definitions for inputtino touch input handling.
*/
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include "inputtino_common.h"
#include "inputtino_touch.h"
using namespace std::literals;
namespace platf::touch {
void
update(client_input_raw_t *raw, const touch_port_t &touch_port, const touch_input_t &touch) {
if (raw->touch) {
switch (touch.eventType) {
case LI_TOUCH_EVENT_HOVER:
case LI_TOUCH_EVENT_DOWN:
case LI_TOUCH_EVENT_MOVE: {
// Convert our 0..360 range to -90..90 relative to Y axis
int adjusted_angle = touch.rotation;
if (adjusted_angle > 90 && adjusted_angle < 270) {
// Lower hemisphere
adjusted_angle = 180 - adjusted_angle;
}
// Wrap the value if it's out of range
if (adjusted_angle > 90) {
adjusted_angle -= 360;
}
else if (adjusted_angle < -90) {
adjusted_angle += 360;
}
(*raw->touch).place_finger(touch.pointerId, touch.x, touch.y, touch.pressureOrDistance, adjusted_angle);
break;
}
case LI_TOUCH_EVENT_CANCEL:
case LI_TOUCH_EVENT_UP:
case LI_TOUCH_EVENT_HOVER_LEAVE: {
(*raw->touch).release_finger(touch.pointerId);
break;
}
// TODO: LI_TOUCH_EVENT_CANCEL_ALL
}
}
}
} // namespace platf::touch
| 1,624
|
C++
|
.cpp
| 49
| 26.285714
| 114
| 0.616316
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,009
|
inputtino_pen.cpp
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_pen.cpp
|
/**
* @file src/platform/linux/input/inputtino_pen.cpp
* @brief Definitions for inputtino pen input handling.
*/
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include "inputtino_common.h"
#include "inputtino_pen.h"
using namespace std::literals;
namespace platf::pen {
void
update(client_input_raw_t *raw, const touch_port_t &touch_port, const pen_input_t &pen) {
if (raw->pen) {
// First set the buttons
(*raw->pen).set_btn(inputtino::PenTablet::PRIMARY, pen.penButtons & LI_PEN_BUTTON_PRIMARY);
(*raw->pen).set_btn(inputtino::PenTablet::SECONDARY, pen.penButtons & LI_PEN_BUTTON_SECONDARY);
(*raw->pen).set_btn(inputtino::PenTablet::TERTIARY, pen.penButtons & LI_PEN_BUTTON_TERTIARY);
// Set the tool
inputtino::PenTablet::TOOL_TYPE tool;
switch (pen.toolType) {
case LI_TOOL_TYPE_PEN:
tool = inputtino::PenTablet::PEN;
break;
case LI_TOOL_TYPE_ERASER:
tool = inputtino::PenTablet::ERASER;
break;
default:
tool = inputtino::PenTablet::SAME_AS_BEFORE;
break;
}
// Normalize rotation value to 0-359 degree range
auto rotation = pen.rotation;
if (rotation != LI_ROT_UNKNOWN) {
rotation %= 360;
}
// Here we receive:
// - Rotation: degrees from vertical in Y dimension (parallel to screen, 0..360)
// - Tilt: degrees from vertical in Z dimension (perpendicular to screen, 0..90)
float tilt_x = 0;
float tilt_y = 0;
// Convert polar coordinates into Y tilt angles
if (pen.tilt != LI_TILT_UNKNOWN && rotation != LI_ROT_UNKNOWN) {
auto rotation_rads = deg2rad(rotation);
auto tilt_rads = deg2rad(pen.tilt);
auto r = std::sin(tilt_rads);
auto z = std::cos(tilt_rads);
tilt_x = std::atan2(std::sin(-rotation_rads) * r, z) * 180.f / M_PI;
tilt_y = std::atan2(std::cos(-rotation_rads) * r, z) * 180.f / M_PI;
}
bool is_touching = pen.eventType == LI_TOUCH_EVENT_DOWN || pen.eventType == LI_TOUCH_EVENT_MOVE;
(*raw->pen).place_tool(tool,
pen.x,
pen.y,
is_touching ? pen.pressureOrDistance : -1,
is_touching ? -1 : pen.pressureOrDistance,
tilt_x,
tilt_y);
}
}
} // namespace platf::pen
| 2,477
|
C++
|
.cpp
| 65
| 31.769231
| 102
| 0.630724
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,010
|
publish.cpp
|
LizardByte_Sunshine/src/platform/macos/publish.cpp
|
/**
* @file src/platform/macos/publish.cpp
* @brief Definitions for publishing services on macOS.
*/
#include <dns_sd.h>
#include <thread>
#include "src/logging.h"
#include "src/network.h"
#include "src/nvhttp.h"
#include "src/platform/common.h"
using namespace std::literals;
namespace platf::publish {
namespace {
/** @brief Custom deleter intended to be used for `std::unique_ptr<DNSServiceRef>`. */
struct ServiceRefDeleter {
typedef DNSServiceRef pointer; ///< Type of object to be deleted.
void
operator()(pointer serviceRef) {
DNSServiceRefDeallocate(serviceRef);
BOOST_LOG(info) << "Deregistered DNS service."sv;
}
};
/** @brief This class encapsulates the polling and deinitialization of our connection with
* the mDNS service. Implements the `::platf::deinit_t` interface.
*/
class deinit_t: public ::platf::deinit_t, std::unique_ptr<DNSServiceRef, ServiceRefDeleter> {
public:
/** @brief Construct deinit_t object.
*
* Create a thread that will use `select(2)` to wait for a response from the mDNS service.
* The thread will give up if an error is received or if `_stopRequested` becomes true.
*
* @param serviceRef An initialized reference to the mDNS service.
*/
deinit_t(DNSServiceRef serviceRef):
unique_ptr(serviceRef) {
_thread = std::thread { [serviceRef, &_stopRequested = std::as_const(_stopRequested)]() {
const auto socket = DNSServiceRefSockFD(serviceRef);
while (!_stopRequested) {
auto fdset = fd_set {};
FD_ZERO(&fdset);
FD_SET(socket, &fdset);
auto timeout = timeval { .tv_sec = 3, .tv_usec = 0 }; // 3 second timeout
const auto ready = select(socket + 1, &fdset, nullptr, nullptr, &timeout);
if (ready == -1) {
BOOST_LOG(error) << "Failed to obtain response from DNS service."sv;
break;
}
else if (ready != 0) {
DNSServiceProcessResult(serviceRef);
break;
}
}
} };
}
/** @brief Ensure that we gracefully finish polling the mDNS service before freeing our
* connection to it.
*/
~deinit_t() override {
_stopRequested = true;
_thread.join();
}
deinit_t(const deinit_t &) = delete;
deinit_t &
operator=(const deinit_t &) = delete;
private:
std::thread _thread; ///< Thread for polling the mDNS service for a response.
std::atomic<bool> _stopRequested = false; ///< Whether to stop polling the mDNS service.
};
/** @brief Callback that will be invoked when the mDNS service finishes registering our service.
* @param errorCode Describes whether the registration was successful.
*/
void
registrationCallback(DNSServiceRef /*serviceRef*/, DNSServiceFlags /*flags*/,
DNSServiceErrorType errorCode, const char * /*name*/,
const char * /*regtype*/, const char * /*domain*/, void * /*context*/) {
if (errorCode != kDNSServiceErr_NoError) {
BOOST_LOG(error) << "Failed to register DNS service: Error "sv << errorCode;
return;
}
BOOST_LOG(info) << "Successfully registered DNS service."sv;
}
} // anonymous namespace
/**
* @brief Main entry point for publication of our service on macOS.
*
* This function initiates a connection to the macOS mDNS service and requests to register
* our Sunshine service. Registration will occur asynchronously (unless it fails immediately,
* which is probably only possible if the host machine is misconfigured).
*
* @return Either `nullptr` (if the registration fails immediately) or a `uniqur_ptr<deinit_t>`,
* which will manage polling for a response from the mDNS service, and then, when
* deconstructed, will deregister the service.
*/
[[nodiscard]] std::unique_ptr<::platf::deinit_t>
start() {
auto serviceRef = DNSServiceRef {};
const auto status = DNSServiceRegister(
&serviceRef,
0, // flags
0, // interfaceIndex
nullptr, // name
SERVICE_TYPE,
nullptr, // domain
nullptr, // host
htons(net::map_port(nvhttp::PORT_HTTP)),
0, // txtLen
nullptr, // txtRecord
registrationCallback,
nullptr // context
);
if (status != kDNSServiceErr_NoError) {
BOOST_LOG(error) << "Failed immediately to register DNS service: Error "sv << status;
return nullptr;
}
return std::make_unique<deinit_t>(serviceRef);
}
} // namespace platf::publish
| 4,699
|
C++
|
.cpp
| 118
| 33.135593
| 100
| 0.63694
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,011
|
input.cpp
|
LizardByte_Sunshine/src/platform/macos/input.cpp
|
/**
* @file src/platform/macos/input.cpp
* @brief Definitions for macOS input handling.
*/
#include "src/input.h"
#import <Carbon/Carbon.h>
#include <chrono>
#include <mach/mach.h>
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include <ApplicationServices/ApplicationServices.h>
#include <CoreFoundation/CoreFoundation.h>
#include <iostream>
#include <thread>
/**
* @brief Delay for a double click, in milliseconds.
* @todo Make this configurable.
*/
constexpr std::chrono::milliseconds MULTICLICK_DELAY_MS(500);
namespace platf {
using namespace std::literals;
struct macos_input_t {
public:
CGDirectDisplayID display {};
CGFloat displayScaling {};
CGEventSourceRef source {};
// keyboard related stuff
CGEventRef kb_event {};
CGEventFlags kb_flags {};
// mouse related stuff
CGEventRef mouse_event {}; // mouse event source
bool mouse_down[3] {}; // mouse button status
std::chrono::steady_clock::steady_clock::time_point last_mouse_event[3][2]; // timestamp of last mouse events
};
// A struct to hold a Windows keycode to Mac virtual keycode mapping.
struct KeyCodeMap {
int win_keycode;
int mac_keycode;
};
// Customized less operator for using std::lower_bound() on a KeyCodeMap array.
bool
operator<(const KeyCodeMap &a, const KeyCodeMap &b) {
return a.win_keycode < b.win_keycode;
}
// clang-format off
const KeyCodeMap kKeyCodesMap[] = {
{ 0x08 /* VKEY_BACK */, kVK_Delete },
{ 0x09 /* VKEY_TAB */, kVK_Tab },
{ 0x0A /* VKEY_BACKTAB */, 0x21E4 },
{ 0x0C /* VKEY_CLEAR */, kVK_ANSI_KeypadClear },
{ 0x0D /* VKEY_RETURN */, kVK_Return },
{ 0x10 /* VKEY_SHIFT */, kVK_Shift },
{ 0x11 /* VKEY_CONTROL */, kVK_Control },
{ 0x12 /* VKEY_MENU */, kVK_Option },
{ 0x13 /* VKEY_PAUSE */, -1 },
{ 0x14 /* VKEY_CAPITAL */, kVK_CapsLock },
{ 0x15 /* VKEY_KANA */, kVK_JIS_Kana },
{ 0x15 /* VKEY_HANGUL */, -1 },
{ 0x17 /* VKEY_JUNJA */, -1 },
{ 0x18 /* VKEY_FINAL */, -1 },
{ 0x19 /* VKEY_HANJA */, -1 },
{ 0x19 /* VKEY_KANJI */, -1 },
{ 0x1B /* VKEY_ESCAPE */, kVK_Escape },
{ 0x1C /* VKEY_CONVERT */, -1 },
{ 0x1D /* VKEY_NONCONVERT */, -1 },
{ 0x1E /* VKEY_ACCEPT */, -1 },
{ 0x1F /* VKEY_MODECHANGE */, -1 },
{ 0x20 /* VKEY_SPACE */, kVK_Space },
{ 0x21 /* VKEY_PRIOR */, kVK_PageUp },
{ 0x22 /* VKEY_NEXT */, kVK_PageDown },
{ 0x23 /* VKEY_END */, kVK_End },
{ 0x24 /* VKEY_HOME */, kVK_Home },
{ 0x25 /* VKEY_LEFT */, kVK_LeftArrow },
{ 0x26 /* VKEY_UP */, kVK_UpArrow },
{ 0x27 /* VKEY_RIGHT */, kVK_RightArrow },
{ 0x28 /* VKEY_DOWN */, kVK_DownArrow },
{ 0x29 /* VKEY_SELECT */, -1 },
{ 0x2A /* VKEY_PRINT */, -1 },
{ 0x2B /* VKEY_EXECUTE */, -1 },
{ 0x2C /* VKEY_SNAPSHOT */, -1 },
{ 0x2D /* VKEY_INSERT */, kVK_Help },
{ 0x2E /* VKEY_DELETE */, kVK_ForwardDelete },
{ 0x2F /* VKEY_HELP */, kVK_Help },
{ 0x30 /* VKEY_0 */, kVK_ANSI_0 },
{ 0x31 /* VKEY_1 */, kVK_ANSI_1 },
{ 0x32 /* VKEY_2 */, kVK_ANSI_2 },
{ 0x33 /* VKEY_3 */, kVK_ANSI_3 },
{ 0x34 /* VKEY_4 */, kVK_ANSI_4 },
{ 0x35 /* VKEY_5 */, kVK_ANSI_5 },
{ 0x36 /* VKEY_6 */, kVK_ANSI_6 },
{ 0x37 /* VKEY_7 */, kVK_ANSI_7 },
{ 0x38 /* VKEY_8 */, kVK_ANSI_8 },
{ 0x39 /* VKEY_9 */, kVK_ANSI_9 },
{ 0x41 /* VKEY_A */, kVK_ANSI_A },
{ 0x42 /* VKEY_B */, kVK_ANSI_B },
{ 0x43 /* VKEY_C */, kVK_ANSI_C },
{ 0x44 /* VKEY_D */, kVK_ANSI_D },
{ 0x45 /* VKEY_E */, kVK_ANSI_E },
{ 0x46 /* VKEY_F */, kVK_ANSI_F },
{ 0x47 /* VKEY_G */, kVK_ANSI_G },
{ 0x48 /* VKEY_H */, kVK_ANSI_H },
{ 0x49 /* VKEY_I */, kVK_ANSI_I },
{ 0x4A /* VKEY_J */, kVK_ANSI_J },
{ 0x4B /* VKEY_K */, kVK_ANSI_K },
{ 0x4C /* VKEY_L */, kVK_ANSI_L },
{ 0x4D /* VKEY_M */, kVK_ANSI_M },
{ 0x4E /* VKEY_N */, kVK_ANSI_N },
{ 0x4F /* VKEY_O */, kVK_ANSI_O },
{ 0x50 /* VKEY_P */, kVK_ANSI_P },
{ 0x51 /* VKEY_Q */, kVK_ANSI_Q },
{ 0x52 /* VKEY_R */, kVK_ANSI_R },
{ 0x53 /* VKEY_S */, kVK_ANSI_S },
{ 0x54 /* VKEY_T */, kVK_ANSI_T },
{ 0x55 /* VKEY_U */, kVK_ANSI_U },
{ 0x56 /* VKEY_V */, kVK_ANSI_V },
{ 0x57 /* VKEY_W */, kVK_ANSI_W },
{ 0x58 /* VKEY_X */, kVK_ANSI_X },
{ 0x59 /* VKEY_Y */, kVK_ANSI_Y },
{ 0x5A /* VKEY_Z */, kVK_ANSI_Z },
{ 0x5B /* VKEY_LWIN */, kVK_Command },
{ 0x5C /* VKEY_RWIN */, kVK_RightCommand },
{ 0x5D /* VKEY_APPS */, kVK_RightCommand },
{ 0x5F /* VKEY_SLEEP */, -1 },
{ 0x60 /* VKEY_NUMPAD0 */, kVK_ANSI_Keypad0 },
{ 0x61 /* VKEY_NUMPAD1 */, kVK_ANSI_Keypad1 },
{ 0x62 /* VKEY_NUMPAD2 */, kVK_ANSI_Keypad2 },
{ 0x63 /* VKEY_NUMPAD3 */, kVK_ANSI_Keypad3 },
{ 0x64 /* VKEY_NUMPAD4 */, kVK_ANSI_Keypad4 },
{ 0x65 /* VKEY_NUMPAD5 */, kVK_ANSI_Keypad5 },
{ 0x66 /* VKEY_NUMPAD6 */, kVK_ANSI_Keypad6 },
{ 0x67 /* VKEY_NUMPAD7 */, kVK_ANSI_Keypad7 },
{ 0x68 /* VKEY_NUMPAD8 */, kVK_ANSI_Keypad8 },
{ 0x69 /* VKEY_NUMPAD9 */, kVK_ANSI_Keypad9 },
{ 0x6A /* VKEY_MULTIPLY */, kVK_ANSI_KeypadMultiply },
{ 0x6B /* VKEY_ADD */, kVK_ANSI_KeypadPlus },
{ 0x6C /* VKEY_SEPARATOR */, -1 },
{ 0x6D /* VKEY_SUBTRACT */, kVK_ANSI_KeypadMinus },
{ 0x6E /* VKEY_DECIMAL */, kVK_ANSI_KeypadDecimal },
{ 0x6F /* VKEY_DIVIDE */, kVK_ANSI_KeypadDivide },
{ 0x70 /* VKEY_F1 */, kVK_F1 },
{ 0x71 /* VKEY_F2 */, kVK_F2 },
{ 0x72 /* VKEY_F3 */, kVK_F3 },
{ 0x73 /* VKEY_F4 */, kVK_F4 },
{ 0x74 /* VKEY_F5 */, kVK_F5 },
{ 0x75 /* VKEY_F6 */, kVK_F6 },
{ 0x76 /* VKEY_F7 */, kVK_F7 },
{ 0x77 /* VKEY_F8 */, kVK_F8 },
{ 0x78 /* VKEY_F9 */, kVK_F9 },
{ 0x79 /* VKEY_F10 */, kVK_F10 },
{ 0x7A /* VKEY_F11 */, kVK_F11 },
{ 0x7B /* VKEY_F12 */, kVK_F12 },
{ 0x7C /* VKEY_F13 */, kVK_F13 },
{ 0x7D /* VKEY_F14 */, kVK_F14 },
{ 0x7E /* VKEY_F15 */, kVK_F15 },
{ 0x7F /* VKEY_F16 */, kVK_F16 },
{ 0x80 /* VKEY_F17 */, kVK_F17 },
{ 0x81 /* VKEY_F18 */, kVK_F18 },
{ 0x82 /* VKEY_F19 */, kVK_F19 },
{ 0x83 /* VKEY_F20 */, kVK_F20 },
{ 0x84 /* VKEY_F21 */, -1 },
{ 0x85 /* VKEY_F22 */, -1 },
{ 0x86 /* VKEY_F23 */, -1 },
{ 0x87 /* VKEY_F24 */, -1 },
{ 0x90 /* VKEY_NUMLOCK */, -1 },
{ 0x91 /* VKEY_SCROLL */, -1 },
{ 0xA0 /* VKEY_LSHIFT */, kVK_Shift },
{ 0xA1 /* VKEY_RSHIFT */, kVK_RightShift },
{ 0xA2 /* VKEY_LCONTROL */, kVK_Control },
{ 0xA3 /* VKEY_RCONTROL */, kVK_RightControl },
{ 0xA4 /* VKEY_LMENU */, kVK_Option },
{ 0xA5 /* VKEY_RMENU */, kVK_RightOption },
{ 0xA6 /* VKEY_BROWSER_BACK */, -1 },
{ 0xA7 /* VKEY_BROWSER_FORWARD */, -1 },
{ 0xA8 /* VKEY_BROWSER_REFRESH */, -1 },
{ 0xA9 /* VKEY_BROWSER_STOP */, -1 },
{ 0xAA /* VKEY_BROWSER_SEARCH */, -1 },
{ 0xAB /* VKEY_BROWSER_FAVORITES */, -1 },
{ 0xAC /* VKEY_BROWSER_HOME */, -1 },
{ 0xAD /* VKEY_VOLUME_MUTE */, -1 },
{ 0xAE /* VKEY_VOLUME_DOWN */, -1 },
{ 0xAF /* VKEY_VOLUME_UP */, -1 },
{ 0xB0 /* VKEY_MEDIA_NEXT_TRACK */, -1 },
{ 0xB1 /* VKEY_MEDIA_PREV_TRACK */, -1 },
{ 0xB2 /* VKEY_MEDIA_STOP */, -1 },
{ 0xB3 /* VKEY_MEDIA_PLAY_PAUSE */, -1 },
{ 0xB4 /* VKEY_MEDIA_LAUNCH_MAIL */, -1 },
{ 0xB5 /* VKEY_MEDIA_LAUNCH_MEDIA_SELECT */, -1 },
{ 0xB6 /* VKEY_MEDIA_LAUNCH_APP1 */, -1 },
{ 0xB7 /* VKEY_MEDIA_LAUNCH_APP2 */, -1 },
{ 0xBA /* VKEY_OEM_1 */, kVK_ANSI_Semicolon },
{ 0xBB /* VKEY_OEM_PLUS */, kVK_ANSI_Equal },
{ 0xBC /* VKEY_OEM_COMMA */, kVK_ANSI_Comma },
{ 0xBD /* VKEY_OEM_MINUS */, kVK_ANSI_Minus },
{ 0xBE /* VKEY_OEM_PERIOD */, kVK_ANSI_Period },
{ 0xBF /* VKEY_OEM_2 */, kVK_ANSI_Slash },
{ 0xC0 /* VKEY_OEM_3 */, kVK_ANSI_Grave },
{ 0xDB /* VKEY_OEM_4 */, kVK_ANSI_LeftBracket },
{ 0xDC /* VKEY_OEM_5 */, kVK_ANSI_Backslash },
{ 0xDD /* VKEY_OEM_6 */, kVK_ANSI_RightBracket },
{ 0xDE /* VKEY_OEM_7 */, kVK_ANSI_Quote },
{ 0xDF /* VKEY_OEM_8 */, -1 },
{ 0xE2 /* VKEY_OEM_102 */, -1 },
{ 0xE5 /* VKEY_PROCESSKEY */, -1 },
{ 0xE7 /* VKEY_PACKET */, -1 },
{ 0xF6 /* VKEY_ATTN */, -1 },
{ 0xF7 /* VKEY_CRSEL */, -1 },
{ 0xF8 /* VKEY_EXSEL */, -1 },
{ 0xF9 /* VKEY_EREOF */, -1 },
{ 0xFA /* VKEY_PLAY */, -1 },
{ 0xFB /* VKEY_ZOOM */, -1 },
{ 0xFC /* VKEY_NONAME */, -1 },
{ 0xFD /* VKEY_PA1 */, -1 },
{ 0xFE /* VKEY_OEM_CLEAR */, kVK_ANSI_KeypadClear }
};
// clang-format on
int
keysym(int keycode) {
KeyCodeMap key_map {};
key_map.win_keycode = keycode;
const KeyCodeMap *temp_map = std::lower_bound(
kKeyCodesMap, kKeyCodesMap + sizeof(kKeyCodesMap) / sizeof(kKeyCodesMap[0]), key_map);
if (temp_map >= kKeyCodesMap + sizeof(kKeyCodesMap) / sizeof(kKeyCodesMap[0]) ||
temp_map->win_keycode != keycode || temp_map->mac_keycode == -1) {
return -1;
}
return temp_map->mac_keycode;
}
void
keyboard_update(input_t &input, uint16_t modcode, bool release, uint8_t flags) {
auto key = keysym(modcode);
BOOST_LOG(debug) << "got keycode: 0x"sv << std::hex << modcode << ", translated to: 0x" << std::hex << key << ", release:" << release;
if (key < 0) {
return;
}
auto macos_input = ((macos_input_t *) input.get());
auto event = macos_input->kb_event;
if (key == kVK_Shift || key == kVK_RightShift ||
key == kVK_Command || key == kVK_RightCommand ||
key == kVK_Option || key == kVK_RightOption ||
key == kVK_Control || key == kVK_RightControl) {
CGEventFlags mask;
switch (key) {
case kVK_Shift:
case kVK_RightShift:
mask = kCGEventFlagMaskShift;
break;
case kVK_Command:
case kVK_RightCommand:
mask = kCGEventFlagMaskCommand;
break;
case kVK_Option:
case kVK_RightOption:
mask = kCGEventFlagMaskAlternate;
break;
case kVK_Control:
case kVK_RightControl:
mask = kCGEventFlagMaskControl;
break;
}
macos_input->kb_flags = release ? macos_input->kb_flags & ~mask : macos_input->kb_flags | mask;
CGEventSetType(event, kCGEventFlagsChanged);
CGEventSetFlags(event, macos_input->kb_flags);
}
else {
CGEventSetIntegerValueField(event, kCGKeyboardEventKeycode, key);
CGEventSetType(event, release ? kCGEventKeyUp : kCGEventKeyDown);
}
CGEventPost(kCGHIDEventTap, event);
}
void
unicode(input_t &input, char *utf8, int size) {
BOOST_LOG(info) << "unicode: Unicode input not yet implemented for MacOS."sv;
}
int
alloc_gamepad(input_t &input, const gamepad_id_t &id, const gamepad_arrival_t &metadata, feedback_queue_t feedback_queue) {
BOOST_LOG(info) << "alloc_gamepad: Gamepad not yet implemented for MacOS."sv;
return -1;
}
void
free_gamepad(input_t &input, int nr) {
BOOST_LOG(info) << "free_gamepad: Gamepad not yet implemented for MacOS."sv;
}
void
gamepad_update(input_t &input, int nr, const gamepad_state_t &gamepad_state) {
BOOST_LOG(info) << "gamepad: Gamepad not yet implemented for MacOS."sv;
}
// returns current mouse location:
util::point_t
get_mouse_loc(input_t &input) {
// Creating a new event every time to avoid any reuse risk
const auto macos_input = static_cast<macos_input_t *>(input.get());
const auto snapshot_event = CGEventCreate(macos_input->source);
const auto current = CGEventGetLocation(snapshot_event);
CFRelease(snapshot_event);
return util::point_t {
current.x,
current.y
};
}
void
post_mouse(
input_t &input,
const CGMouseButton button,
const CGEventType type,
const util::point_t raw_location,
const util::point_t previous_location,
const int click_count) {
BOOST_LOG(debug) << "mouse_event: "sv << button << ", type: "sv << type << ", location:"sv << raw_location.x << ":"sv << raw_location.y << " click_count: "sv << click_count;
const auto macos_input = static_cast<macos_input_t *>(input.get());
const auto display = macos_input->display;
const auto event = macos_input->mouse_event;
// get display bounds for current display
const CGRect display_bounds = CGDisplayBounds(display);
// limit mouse to current display bounds
const auto location = CGPoint {
std::clamp(raw_location.x, display_bounds.origin.x, display_bounds.origin.x + display_bounds.size.width - 1),
std::clamp(raw_location.y, display_bounds.origin.y, display_bounds.origin.y + display_bounds.size.height - 1)
};
CGEventSetType(event, type);
CGEventSetLocation(event, location);
CGEventSetIntegerValueField(event, kCGMouseEventButtonNumber, button);
CGEventSetIntegerValueField(event, kCGMouseEventClickState, click_count);
// Include deltas so some 3D applications can consume changes (game cameras, etc)
const double deltaX = raw_location.x - previous_location.x;
const double deltaY = raw_location.y - previous_location.y;
CGEventSetDoubleValueField(event, kCGMouseEventDeltaX, deltaX);
CGEventSetDoubleValueField(event, kCGMouseEventDeltaY, deltaY);
CGEventPost(kCGHIDEventTap, event);
// For why this is here, see:
// https://stackoverflow.com/questions/15194409/simulated-mouseevent-not-working-properly-osx
CGWarpMouseCursorPosition(location);
}
inline CGEventType
event_type_mouse(input_t &input) {
const auto macos_input = static_cast<macos_input_t *>(input.get());
if (macos_input->mouse_down[0]) {
return kCGEventLeftMouseDragged;
}
if (macos_input->mouse_down[1]) {
return kCGEventOtherMouseDragged;
}
if (macos_input->mouse_down[2]) {
return kCGEventRightMouseDragged;
}
return kCGEventMouseMoved;
}
void
move_mouse(
input_t &input,
const int deltaX,
const int deltaY) {
const auto current = get_mouse_loc(input);
const auto location = util::point_t { current.x + deltaX, current.y + deltaY };
post_mouse(input, kCGMouseButtonLeft, event_type_mouse(input), location, current, 0);
}
void
abs_mouse(
input_t &input,
const touch_port_t &touch_port,
const float x,
const float y) {
const auto macos_input = static_cast<macos_input_t *>(input.get());
const auto scaling = macos_input->displayScaling;
const auto display = macos_input->display;
auto location = util::point_t { x * scaling, y * scaling };
CGRect display_bounds = CGDisplayBounds(display);
// in order to get the correct mouse location for capturing display , we need to add the display bounds to the location
location.x += display_bounds.origin.x;
location.y += display_bounds.origin.y;
post_mouse(input, kCGMouseButtonLeft, event_type_mouse(input), location, get_mouse_loc(input), 0);
}
void
button_mouse(input_t &input, const int button, const bool release) {
CGMouseButton mac_button;
CGEventType event;
const auto macos_input = static_cast<macos_input_t *>(input.get());
switch (button) {
case 1:
mac_button = kCGMouseButtonLeft;
event = release ? kCGEventLeftMouseUp : kCGEventLeftMouseDown;
break;
case 2:
mac_button = kCGMouseButtonCenter;
event = release ? kCGEventOtherMouseUp : kCGEventOtherMouseDown;
break;
case 3:
mac_button = kCGMouseButtonRight;
event = release ? kCGEventRightMouseUp : kCGEventRightMouseDown;
break;
default:
BOOST_LOG(warning) << "Unsupported mouse button for MacOS: "sv << button;
return;
}
macos_input->mouse_down[mac_button] = !release;
// if the last mouse down was less than MULTICLICK_DELAY_MS, we send a double click event
const auto now = std::chrono::steady_clock::now();
const auto mouse_position = get_mouse_loc(input);
if (now < macos_input->last_mouse_event[mac_button][release] + MULTICLICK_DELAY_MS) {
post_mouse(input, mac_button, event, mouse_position, mouse_position, 2);
}
else {
post_mouse(input, mac_button, event, mouse_position, mouse_position, 1);
}
macos_input->last_mouse_event[mac_button][release] = now;
}
void
scroll(input_t &input, const int high_res_distance) {
CGEventRef upEvent = CGEventCreateScrollWheelEvent(
nullptr,
kCGScrollEventUnitLine,
2, high_res_distance > 0 ? 1 : -1, high_res_distance);
CGEventPost(kCGHIDEventTap, upEvent);
CFRelease(upEvent);
}
void
hscroll(input_t &input, int high_res_distance) {
// Unimplemented
}
/**
* @brief Allocates a context to store per-client input data.
* @param input The global input context.
* @return A unique pointer to a per-client input data context.
*/
std::unique_ptr<client_input_t>
allocate_client_input_context(input_t &input) {
// Unused
return nullptr;
}
/**
* @brief Sends a touch event to the OS.
* @param input The client-specific input context.
* @param touch_port The current viewport for translating to screen coordinates.
* @param touch The touch event.
*/
void
touch_update(client_input_t *input, const touch_port_t &touch_port, const touch_input_t &touch) {
// Unimplemented feature - platform_caps::pen_touch
}
/**
* @brief Sends a pen event to the OS.
* @param input The client-specific input context.
* @param touch_port The current viewport for translating to screen coordinates.
* @param pen The pen event.
*/
void
pen_update(client_input_t *input, const touch_port_t &touch_port, const pen_input_t &pen) {
// Unimplemented feature - platform_caps::pen_touch
}
/**
* @brief Sends a gamepad touch event to the OS.
* @param input The global input context.
* @param touch The touch event.
*/
void
gamepad_touch(input_t &input, const gamepad_touch_t &touch) {
// Unimplemented feature - platform_caps::controller_touch
}
/**
* @brief Sends a gamepad motion event to the OS.
* @param input The global input context.
* @param motion The motion event.
*/
void
gamepad_motion(input_t &input, const gamepad_motion_t &motion) {
// Unimplemented
}
/**
* @brief Sends a gamepad battery event to the OS.
* @param input The global input context.
* @param battery The battery event.
*/
void
gamepad_battery(input_t &input, const gamepad_battery_t &battery) {
// Unimplemented
}
input_t
input() {
input_t result { new macos_input_t() };
const auto macos_input = static_cast<macos_input_t *>(result.get());
// Default to main display
macos_input->display = CGMainDisplayID();
auto output_name = config::video.output_name;
// If output_name is set, try to find the display with that display id
if (!output_name.empty()) {
uint32_t max_display = 32;
uint32_t display_count;
CGDirectDisplayID displays[max_display];
if (CGGetActiveDisplayList(max_display, displays, &display_count) != kCGErrorSuccess) {
BOOST_LOG(error) << "Unable to get active display list , error: "sv << std::endl;
}
else {
for (int i = 0; i < display_count; i++) {
CGDirectDisplayID display_id = displays[i];
if (display_id == std::atoi(output_name.c_str())) {
macos_input->display = display_id;
}
}
}
}
// Input coordinates are based on the virtual resolution not the physical, so we need the scaling factor
const CGDisplayModeRef mode = CGDisplayCopyDisplayMode(macos_input->display);
macos_input->displayScaling = ((CGFloat) CGDisplayPixelsWide(macos_input->display)) / ((CGFloat) CGDisplayModeGetPixelWidth(mode));
CFRelease(mode);
macos_input->source = CGEventSourceCreate(kCGEventSourceStateHIDSystemState);
macos_input->kb_event = CGEventCreate(macos_input->source);
macos_input->kb_flags = 0;
macos_input->mouse_event = CGEventCreate(macos_input->source);
macos_input->mouse_down[0] = false;
macos_input->mouse_down[1] = false;
macos_input->mouse_down[2] = false;
BOOST_LOG(debug) << "Display "sv << macos_input->display << ", pixel dimension: " << CGDisplayPixelsWide(macos_input->display) << "x"sv << CGDisplayPixelsHigh(macos_input->display);
return result;
}
void
freeInput(void *p) {
const auto *input = static_cast<macos_input_t *>(p);
CFRelease(input->source);
CFRelease(input->kb_event);
CFRelease(input->mouse_event);
delete input;
}
std::vector<supported_gamepad_t> &
supported_gamepads(input_t *input) {
static std::vector gamepads {
supported_gamepad_t { "", false, "gamepads.macos_not_implemented" }
};
return gamepads;
}
/**
* @brief Returns the supported platform capabilities to advertise to the client.
* @return Capability flags.
*/
platform_caps::caps_t
get_capabilities() {
return 0;
}
} // namespace platf
| 26,381
|
C++
|
.cpp
| 538
| 44.626394
| 185
| 0.49123
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,012
|
nv12_zero_device.cpp
|
LizardByte_Sunshine/src/platform/macos/nv12_zero_device.cpp
|
/**
* @file src/platform/macos/nv12_zero_device.cpp
* @brief Definitions for NV12 zero copy device on macOS.
*/
#include <utility>
#include "src/platform/macos/av_img_t.h"
#include "src/platform/macos/nv12_zero_device.h"
#include "src/video.h"
extern "C" {
#include "libavutil/imgutils.h"
}
namespace platf {
void
free_frame(AVFrame *frame) {
av_frame_free(&frame);
}
void
free_buffer(void *opaque, uint8_t *data) {
CVPixelBufferRelease((CVPixelBufferRef) data);
}
util::safe_ptr<AVFrame, free_frame> av_frame;
int
nv12_zero_device::convert(platf::img_t &img) {
auto *av_img = (av_img_t *) &img;
// Release any existing CVPixelBuffer previously retained for encoding
av_buffer_unref(&av_frame->buf[0]);
// Attach an AVBufferRef to this frame which will retain ownership of the CVPixelBuffer
// until av_buffer_unref() is called (above) or the frame is freed with av_frame_free().
//
// The presence of the AVBufferRef allows FFmpeg to simply add a reference to the buffer
// rather than having to perform a deep copy of the data buffers in avcodec_send_frame().
av_frame->buf[0] = av_buffer_create((uint8_t *) CFRetain(av_img->pixel_buffer->buf), 0, free_buffer, nullptr, 0);
// Place a CVPixelBufferRef at data[3] as required by AV_PIX_FMT_VIDEOTOOLBOX
av_frame->data[3] = (uint8_t *) av_img->pixel_buffer->buf;
return 0;
}
int
nv12_zero_device::set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx) {
this->frame = frame;
av_frame.reset(frame);
resolution_fn(this->display, frame->width, frame->height);
return 0;
}
int
nv12_zero_device::init(void *display, pix_fmt_e pix_fmt, resolution_fn_t resolution_fn, const pixel_format_fn_t &pixel_format_fn) {
pixel_format_fn(display, pix_fmt == pix_fmt_e::nv12 ?
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange :
kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange);
this->display = display;
this->resolution_fn = std::move(resolution_fn);
// we never use this pointer, but its existence is checked/used
// by the platform independent code
data = this;
return 0;
}
} // namespace platf
| 2,247
|
C++
|
.cpp
| 56
| 35.232143
| 133
| 0.691244
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,013
|
publish.cpp
|
LizardByte_Sunshine/src/platform/windows/publish.cpp
|
/**
* @file src/platform/windows/publish.cpp
* @brief Definitions for Windows mDNS service registration.
*/
#include <winsock2.h>
#include <windows.h>
#include <windns.h>
#include <winerror.h>
#include "misc.h"
#include "src/config.h"
#include "src/logging.h"
#include "src/network.h"
#include "src/nvhttp.h"
#include "src/platform/common.h"
#include "src/thread_safe.h"
#define _FN(x, ret, args) \
typedef ret(*x##_fn) args; \
static x##_fn x
using namespace std::literals;
#define __SV(quote) L##quote##sv
#define SV(quote) __SV(quote)
extern "C" {
#ifndef __MINGW32__
constexpr auto DNS_REQUEST_PENDING = 9506L;
constexpr auto DNS_QUERY_REQUEST_VERSION1 = 0x1;
constexpr auto DNS_QUERY_RESULTS_VERSION1 = 0x1;
#endif
#define SERVICE_DOMAIN "local"
constexpr auto SERVICE_TYPE_DOMAIN = SV(SERVICE_TYPE "." SERVICE_DOMAIN);
#ifndef __MINGW32__
typedef struct _DNS_SERVICE_INSTANCE {
LPWSTR pszInstanceName;
LPWSTR pszHostName;
IP4_ADDRESS *ip4Address;
IP6_ADDRESS *ip6Address;
WORD wPort;
WORD wPriority;
WORD wWeight;
// Property list
DWORD dwPropertyCount;
PWSTR *keys;
PWSTR *values;
DWORD dwInterfaceIndex;
} DNS_SERVICE_INSTANCE, *PDNS_SERVICE_INSTANCE;
#endif
typedef VOID WINAPI
DNS_SERVICE_REGISTER_COMPLETE(
_In_ DWORD Status,
_In_ PVOID pQueryContext,
_In_ PDNS_SERVICE_INSTANCE pInstance);
typedef DNS_SERVICE_REGISTER_COMPLETE *PDNS_SERVICE_REGISTER_COMPLETE;
#ifndef __MINGW32__
typedef struct _DNS_SERVICE_CANCEL {
PVOID reserved;
} DNS_SERVICE_CANCEL, *PDNS_SERVICE_CANCEL;
typedef struct _DNS_SERVICE_REGISTER_REQUEST {
ULONG Version;
ULONG InterfaceIndex;
PDNS_SERVICE_INSTANCE pServiceInstance;
PDNS_SERVICE_REGISTER_COMPLETE pRegisterCompletionCallback;
PVOID pQueryContext;
HANDLE hCredentials;
BOOL unicastEnabled;
} DNS_SERVICE_REGISTER_REQUEST, *PDNS_SERVICE_REGISTER_REQUEST;
#endif
_FN(_DnsServiceFreeInstance, VOID, (_In_ PDNS_SERVICE_INSTANCE pInstance));
_FN(_DnsServiceDeRegister, DWORD, (_In_ PDNS_SERVICE_REGISTER_REQUEST pRequest, _Inout_opt_ PDNS_SERVICE_CANCEL pCancel));
_FN(_DnsServiceRegister, DWORD, (_In_ PDNS_SERVICE_REGISTER_REQUEST pRequest, _Inout_opt_ PDNS_SERVICE_CANCEL pCancel));
} /* extern "C" */
namespace platf::publish {
VOID WINAPI
register_cb(DWORD status, PVOID pQueryContext, PDNS_SERVICE_INSTANCE pInstance) {
auto alarm = (safe::alarm_t<PDNS_SERVICE_INSTANCE>::element_type *) pQueryContext;
if (status) {
print_status("register_cb()"sv, status);
}
alarm->ring(pInstance);
}
static int
service(bool enable, PDNS_SERVICE_INSTANCE &existing_instance) {
auto alarm = safe::make_alarm<PDNS_SERVICE_INSTANCE>();
std::wstring domain { SERVICE_TYPE_DOMAIN.data(), SERVICE_TYPE_DOMAIN.size() };
auto hostname = platf::get_host_name();
auto name = from_utf8(net::mdns_instance_name(hostname) + '.') + domain;
auto host = from_utf8(hostname + ".local");
DNS_SERVICE_INSTANCE instance {};
instance.pszInstanceName = name.data();
instance.wPort = net::map_port(nvhttp::PORT_HTTP);
instance.pszHostName = host.data();
// Setting these values ensures Windows mDNS answers comply with RFC 1035.
// If these are unset, Windows will send a TXT record that has zero strings,
// which is illegal. Setting them to a single empty value causes Windows to
// send a single empty string for the TXT record, which is the correct thing
// to do when advertising a service without any TXT strings.
//
// Most clients aren't strictly checking TXT record compliance with RFC 1035,
// but Apple's mDNS resolver does and rejects the entire answer if an invalid
// TXT record is present.
PWCHAR keys[] = { nullptr };
PWCHAR values[] = { nullptr };
instance.dwPropertyCount = 1;
instance.keys = keys;
instance.values = values;
DNS_SERVICE_REGISTER_REQUEST req {};
req.Version = DNS_QUERY_REQUEST_VERSION1;
req.pQueryContext = alarm.get();
req.pServiceInstance = enable ? &instance : existing_instance;
req.pRegisterCompletionCallback = register_cb;
DNS_STATUS status {};
if (enable) {
status = _DnsServiceRegister(&req, nullptr);
if (status != DNS_REQUEST_PENDING) {
print_status("DnsServiceRegister()"sv, status);
return -1;
}
}
else {
status = _DnsServiceDeRegister(&req, nullptr);
if (status != DNS_REQUEST_PENDING) {
print_status("DnsServiceDeRegister()"sv, status);
return -1;
}
}
alarm->wait();
auto registered_instance = alarm->status();
if (enable) {
// Store this instance for later deregistration
existing_instance = registered_instance;
}
else if (registered_instance) {
// Deregistration was successful
_DnsServiceFreeInstance(registered_instance);
existing_instance = nullptr;
}
return registered_instance ? 0 : -1;
}
class mdns_registration_t: public ::platf::deinit_t {
public:
mdns_registration_t():
existing_instance(nullptr) {
if (service(true, existing_instance)) {
BOOST_LOG(error) << "Unable to register Sunshine mDNS service"sv;
return;
}
BOOST_LOG(info) << "Registered Sunshine mDNS service"sv;
}
~mdns_registration_t() override {
if (existing_instance) {
if (service(false, existing_instance)) {
BOOST_LOG(error) << "Unable to unregister Sunshine mDNS service"sv;
return;
}
BOOST_LOG(info) << "Unregistered Sunshine mDNS service"sv;
}
}
private:
PDNS_SERVICE_INSTANCE existing_instance;
};
int
load_funcs(HMODULE handle) {
auto fg = util::fail_guard([handle]() {
FreeLibrary(handle);
});
_DnsServiceFreeInstance = (_DnsServiceFreeInstance_fn) GetProcAddress(handle, "DnsServiceFreeInstance");
_DnsServiceDeRegister = (_DnsServiceDeRegister_fn) GetProcAddress(handle, "DnsServiceDeRegister");
_DnsServiceRegister = (_DnsServiceRegister_fn) GetProcAddress(handle, "DnsServiceRegister");
if (!(_DnsServiceFreeInstance && _DnsServiceDeRegister && _DnsServiceRegister)) {
BOOST_LOG(error) << "mDNS service not available in dnsapi.dll"sv;
return -1;
}
fg.disable();
return 0;
}
std::unique_ptr<::platf::deinit_t>
start() {
HMODULE handle = LoadLibrary("dnsapi.dll");
if (!handle || load_funcs(handle)) {
BOOST_LOG(error) << "Couldn't load dnsapi.dll, You'll need to add PC manually from Moonlight"sv;
return nullptr;
}
return std::make_unique<mdns_registration_t>();
}
} // namespace platf::publish
| 6,658
|
C++
|
.cpp
| 183
| 32.043716
| 122
| 0.706797
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,014
|
display_base.cpp
|
LizardByte_Sunshine/src/platform/windows/display_base.cpp
|
/**
* @file src/platform/windows/display_base.cpp
* @brief Definitions for the Windows display base code.
*/
#include <cmath>
#include <initguid.h>
#include <thread>
#include <boost/algorithm/string/join.hpp>
#include <boost/process/v1.hpp>
// We have to include boost/process/v1.hpp before display.h due to WinSock.h,
// but that prevents the definition of NTSTATUS so we must define it ourself.
typedef long NTSTATUS;
#include "display.h"
#include "misc.h"
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/video.h"
namespace platf {
using namespace std::literals;
}
namespace platf::dxgi {
namespace bp = boost::process;
/**
* DDAPI-specific initialization goes here.
*/
int
duplication_t::init(display_base_t *display, const ::video::config_t &config) {
HRESULT status;
// Capture format will be determined from the first call to AcquireNextFrame()
display->capture_format = DXGI_FORMAT_UNKNOWN;
// FIXME: Duplicate output on RX580 in combination with DOOM (2016) --> BSOD
{
// IDXGIOutput5 is optional, but can provide improved performance and wide color support
dxgi::output5_t output5 {};
status = display->output->QueryInterface(IID_IDXGIOutput5, (void **) &output5);
if (SUCCEEDED(status)) {
// Ask the display implementation which formats it supports
auto supported_formats = display->get_supported_capture_formats();
if (supported_formats.empty()) {
BOOST_LOG(warning) << "No compatible capture formats for this encoder"sv;
return -1;
}
// We try this twice, in case we still get an error on reinitialization
for (int x = 0; x < 2; ++x) {
// Ensure we can duplicate the current display
syncThreadDesktop();
status = output5->DuplicateOutput1((IUnknown *) display->device.get(), 0, supported_formats.size(), supported_formats.data(), &dup);
if (SUCCEEDED(status)) {
break;
}
std::this_thread::sleep_for(200ms);
}
// We don't retry with DuplicateOutput() because we can hit this codepath when we're racing
// with mode changes and we don't want to accidentally fall back to suboptimal capture if
// we get unlucky and succeed below.
if (FAILED(status)) {
BOOST_LOG(warning) << "DuplicateOutput1 Failed [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
}
else {
BOOST_LOG(warning) << "IDXGIOutput5 is not supported by your OS. Capture performance may be reduced."sv;
dxgi::output1_t output1 {};
status = display->output->QueryInterface(IID_IDXGIOutput1, (void **) &output1);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to query IDXGIOutput1 from the output"sv;
return -1;
}
for (int x = 0; x < 2; ++x) {
// Ensure we can duplicate the current display
syncThreadDesktop();
status = output1->DuplicateOutput((IUnknown *) display->device.get(), &dup);
if (SUCCEEDED(status)) {
break;
}
std::this_thread::sleep_for(200ms);
}
if (FAILED(status)) {
BOOST_LOG(error) << "DuplicateOutput Failed [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
}
}
DXGI_OUTDUPL_DESC dup_desc;
dup->GetDesc(&dup_desc);
BOOST_LOG(info) << "Desktop resolution ["sv << dup_desc.ModeDesc.Width << 'x' << dup_desc.ModeDesc.Height << ']';
BOOST_LOG(info) << "Desktop format ["sv << display->dxgi_format_to_string(dup_desc.ModeDesc.Format) << ']';
display->display_refresh_rate = dup_desc.ModeDesc.RefreshRate;
double display_refresh_rate_decimal = (double) display->display_refresh_rate.Numerator / display->display_refresh_rate.Denominator;
BOOST_LOG(info) << "Display refresh rate [" << display_refresh_rate_decimal << "Hz]";
BOOST_LOG(info) << "Requested frame rate [" << display->client_frame_rate << "fps]";
display->display_refresh_rate_rounded = lround(display_refresh_rate_decimal);
return 0;
}
capture_e
duplication_t::next_frame(DXGI_OUTDUPL_FRAME_INFO &frame_info, std::chrono::milliseconds timeout, resource_t::pointer *res_p) {
auto capture_status = release_frame();
if (capture_status != capture_e::ok) {
return capture_status;
}
auto status = dup->AcquireNextFrame(timeout.count(), &frame_info, res_p);
switch (status) {
case S_OK:
// ProtectedContentMaskedOut seems to semi-randomly be TRUE or FALSE even when protected content
// is on screen the whole time, so we can't just print when it changes. Instead we'll keep track
// of the last time we printed the warning and print another if we haven't printed one recently.
if (frame_info.ProtectedContentMaskedOut && std::chrono::steady_clock::now() > last_protected_content_warning_time + 10s) {
BOOST_LOG(warning) << "Windows is currently blocking DRM-protected content from capture. You may see black regions where this content would be."sv;
last_protected_content_warning_time = std::chrono::steady_clock::now();
}
has_frame = true;
return capture_e::ok;
case DXGI_ERROR_WAIT_TIMEOUT:
return capture_e::timeout;
case WAIT_ABANDONED:
case DXGI_ERROR_ACCESS_LOST:
case DXGI_ERROR_ACCESS_DENIED:
return capture_e::reinit;
default:
BOOST_LOG(error) << "Couldn't acquire next frame [0x"sv << util::hex(status).to_string_view();
return capture_e::error;
}
}
capture_e
duplication_t::reset(dup_t::pointer dup_p) {
auto capture_status = release_frame();
dup.reset(dup_p);
return capture_status;
}
capture_e
duplication_t::release_frame() {
if (!has_frame) {
return capture_e::ok;
}
auto status = dup->ReleaseFrame();
has_frame = false;
switch (status) {
case S_OK:
return capture_e::ok;
case DXGI_ERROR_INVALID_CALL:
BOOST_LOG(warning) << "Duplication frame already released";
return capture_e::ok;
case DXGI_ERROR_ACCESS_LOST:
return capture_e::reinit;
default:
BOOST_LOG(error) << "Error while releasing duplication frame [0x"sv << util::hex(status).to_string_view();
return capture_e::error;
}
}
duplication_t::~duplication_t() {
release_frame();
}
capture_e
display_base_t::capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) {
auto adjust_client_frame_rate = [&]() -> DXGI_RATIONAL {
// Adjust capture frame interval when display refresh rate is not integral but very close to requested fps.
if (display_refresh_rate.Denominator > 1) {
DXGI_RATIONAL candidate = display_refresh_rate;
if (client_frame_rate % display_refresh_rate_rounded == 0) {
candidate.Numerator *= client_frame_rate / display_refresh_rate_rounded;
}
else if (display_refresh_rate_rounded % client_frame_rate == 0) {
candidate.Denominator *= display_refresh_rate_rounded / client_frame_rate;
}
double candidate_rate = (double) candidate.Numerator / candidate.Denominator;
// Can only decrease requested fps, otherwise client may start accumulating frames and suffer increased latency.
if (client_frame_rate > candidate_rate && candidate_rate / client_frame_rate > 0.99) {
BOOST_LOG(info) << "Adjusted capture rate to " << candidate_rate << "fps to better match display";
return candidate;
}
}
return { (uint32_t) client_frame_rate, 1 };
};
DXGI_RATIONAL client_frame_rate_adjusted = adjust_client_frame_rate();
std::optional<std::chrono::steady_clock::time_point> frame_pacing_group_start;
uint32_t frame_pacing_group_frames = 0;
// Keep the display awake during capture. If the display goes to sleep during
// capture, best case is that capture stops until it powers back on. However,
// worst case it will trigger us to reinit DD, waking the display back up in
// a neverending cycle of waking and sleeping the display of an idle machine.
SetThreadExecutionState(ES_CONTINUOUS | ES_DISPLAY_REQUIRED);
auto clear_display_required = util::fail_guard([]() {
SetThreadExecutionState(ES_CONTINUOUS);
});
sleep_overshoot_logger.reset();
while (true) {
// This will return false if the HDR state changes or for any number of other
// display or GPU changes. We should reinit to examine the updated state of
// the display subsystem. It is recommended to call this once per frame.
if (!factory->IsCurrent()) {
return platf::capture_e::reinit;
}
platf::capture_e status = capture_e::ok;
std::shared_ptr<img_t> img_out;
// Try to continue frame pacing group, snapshot() is called with zero timeout after waiting for client frame interval
if (frame_pacing_group_start) {
const uint32_t seconds = (uint64_t) frame_pacing_group_frames * client_frame_rate_adjusted.Denominator / client_frame_rate_adjusted.Numerator;
const uint32_t remainder = (uint64_t) frame_pacing_group_frames * client_frame_rate_adjusted.Denominator % client_frame_rate_adjusted.Numerator;
const auto sleep_target = *frame_pacing_group_start +
std::chrono::nanoseconds(1s) * seconds +
std::chrono::nanoseconds(1s) * remainder / client_frame_rate_adjusted.Numerator;
const auto sleep_period = sleep_target - std::chrono::steady_clock::now();
if (sleep_period <= 0ns) {
// We missed next frame time, invalidating current frame pacing group
frame_pacing_group_start = std::nullopt;
frame_pacing_group_frames = 0;
status = capture_e::timeout;
}
else {
timer->sleep_for(sleep_period);
sleep_overshoot_logger.first_point(sleep_target);
sleep_overshoot_logger.second_point_now_and_log();
status = snapshot(pull_free_image_cb, img_out, 0ms, *cursor);
if (status == capture_e::ok && img_out) {
frame_pacing_group_frames += 1;
}
else {
frame_pacing_group_start = std::nullopt;
frame_pacing_group_frames = 0;
}
}
}
// Start new frame pacing group if necessary, snapshot() is called with non-zero timeout
if (status == capture_e::timeout || (status == capture_e::ok && !frame_pacing_group_start)) {
status = snapshot(pull_free_image_cb, img_out, 200ms, *cursor);
if (status == capture_e::ok && img_out) {
frame_pacing_group_start = img_out->frame_timestamp;
if (!frame_pacing_group_start) {
BOOST_LOG(warning) << "snapshot() provided image without timestamp";
frame_pacing_group_start = std::chrono::steady_clock::now();
}
frame_pacing_group_frames = 1;
}
else if (status == platf::capture_e::timeout) {
// The D3D11 device is protected by an unfair lock that is held the entire time that
// IDXGIOutputDuplication::AcquireNextFrame() is running. This is normally harmless,
// however sometimes the encoding thread needs to interact with our ID3D11Device to
// create dummy images or initialize the shared state that is used to pass textures
// between the capture and encoding ID3D11Devices.
//
// When we're in a state where we're not actively receiving frames regularly, we will
// spend almost 100% of our time in AcquireNextFrame() holding that critical lock.
// Worse still, since it's unfair, we can monopolize it while the encoding thread
// is starved. The encoding thread may acquire it for a few moments across a few
// ID3D11Device calls before losing it again to us for another long time waiting in
// AcquireNextFrame(). The starvation caused by this lock contention causes encoder
// reinitialization to take several seconds instead of a fraction of a second.
//
// To avoid starving the encoding thread, sleep without the lock held for a little
// while each time we reach our max frame timeout. This will only happen when nothing
// is updating the display, so no visible stutter should be introduced by the sleep.
std::this_thread::sleep_for(10ms);
}
}
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return capture_e::ok;
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return capture_e::ok;
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv << (int) status << ']';
return status;
}
status = release_snapshot();
if (status != platf::capture_e::ok) {
return status;
}
}
return capture_e::ok;
}
bool
set_gpu_preference_on_self(int preference) {
// The GPU preferences key uses app path as the value name.
WCHAR sunshine_path[MAX_PATH];
GetModuleFileNameW(NULL, sunshine_path, ARRAYSIZE(sunshine_path));
WCHAR value_data[128];
swprintf_s(value_data, L"GpuPreference=%d;", preference);
auto status = RegSetKeyValueW(HKEY_CURRENT_USER,
L"Software\\Microsoft\\DirectX\\UserGpuPreferences",
sunshine_path,
REG_SZ,
value_data,
(wcslen(value_data) + 1) * sizeof(WCHAR));
if (status != ERROR_SUCCESS) {
BOOST_LOG(error) << "Failed to set GPU preference: "sv << status;
return false;
}
BOOST_LOG(info) << "Set GPU preference: "sv << preference;
return true;
}
bool
validate_and_test_gpu_preference(const std::string &display_name, bool verify_frame_capture) {
std::string cmd = "tools\\ddprobe.exe";
// We start at 1 because 0 is automatic selection which can be overridden by
// the GPU driver control panel options. Since ddprobe.exe can have different
// GPU driver overrides than Sunshine.exe, we want to avoid a scenario where
// autoselection might work for ddprobe.exe but not for us.
for (int i = 1; i < 5; i++) {
// Run the probe tool. It returns the status of DuplicateOutput().
//
// Arg format: [GPU preference] [Display name] [--verify-frame-capture]
HRESULT result;
std::vector<std::string> args = { std::to_string(i), display_name };
try {
if (verify_frame_capture) {
args.emplace_back("--verify-frame-capture");
}
result = bp::system(cmd, bp::args(args), bp::std_out > bp::null, bp::std_err > bp::null);
}
catch (bp::process_error &e) {
BOOST_LOG(error) << "Failed to start ddprobe.exe: "sv << e.what();
return false;
}
BOOST_LOG(info) << "ddprobe.exe " << boost::algorithm::join(args, " ") << " returned 0x"
<< util::hex(result).to_string_view();
// E_ACCESSDENIED can happen at the login screen. If we get this error,
// we know capture would have been supported, because DXGI_ERROR_UNSUPPORTED
// would have been raised first if it wasn't.
if (result == S_OK || result == E_ACCESSDENIED) {
// We found a working GPU preference, so set ourselves to use that.
if (set_gpu_preference_on_self(i)) {
return true;
}
else {
return false;
}
}
}
// If no valid configuration was found, return false
return false;
}
// On hybrid graphics systems, Windows will change the order of GPUs reported by
// DXGI in accordance with the user's GPU preference. If the selected GPU is a
// render-only device with no displays, DXGI will add virtual outputs to the
// that device to avoid confusing applications. While this works properly for most
// applications, it breaks the Desktop Duplication API because DXGI doesn't proxy
// the virtual DXGIOutput to the real GPU it is attached to. When trying to call
// DuplicateOutput() on one of these virtual outputs, it fails with DXGI_ERROR_UNSUPPORTED
// (even if you try sneaky stuff like passing the ID3D11Device for the iGPU and the
// virtual DXGIOutput from the dGPU). Because the GPU preference is once-per-process,
// we spawn a helper tool to probe for us before we set our own GPU preference.
bool
probe_for_gpu_preference(const std::string &display_name) {
static bool set_gpu_preference = false;
// If we've already been through here, there's nothing to do this time.
if (set_gpu_preference) {
return true;
}
// Try probing with different GPU preferences and verify_frame_capture flag
if (validate_and_test_gpu_preference(display_name, true)) {
set_gpu_preference = true;
return true;
}
// If no valid configuration was found, try again with verify_frame_capture == false
if (validate_and_test_gpu_preference(display_name, false)) {
set_gpu_preference = true;
return true;
}
// If neither worked, return false
return false;
}
/**
* @brief Tests to determine if the Desktop Duplication API can capture the given output.
* @details When testing for enumeration only, we avoid resyncing the thread desktop.
* @param adapter The DXGI adapter to use for capture.
* @param output The DXGI output to capture.
* @param enumeration_only Specifies whether this test is occurring for display enumeration.
*/
bool
test_dxgi_duplication(adapter_t &adapter, output_t &output, bool enumeration_only) {
D3D_FEATURE_LEVEL featureLevels[] {
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
device_t device;
auto status = D3D11CreateDevice(
adapter.get(),
D3D_DRIVER_TYPE_UNKNOWN,
nullptr,
D3D11_CREATE_DEVICE_FLAGS,
featureLevels, sizeof(featureLevels) / sizeof(D3D_FEATURE_LEVEL),
D3D11_SDK_VERSION,
&device,
nullptr,
nullptr);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create D3D11 device for DD test [0x"sv << util::hex(status).to_string_view() << ']';
return false;
}
output1_t output1;
status = output->QueryInterface(IID_IDXGIOutput1, (void **) &output1);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to query IDXGIOutput1 from the output"sv;
return false;
}
// Check if we can use the Desktop Duplication API on this output
for (int x = 0; x < 2; ++x) {
dup_t dup;
// Only resynchronize the thread desktop when not enumerating displays.
// During enumeration, the caller will do this only once to ensure
// a consistent view of available outputs.
if (!enumeration_only) {
syncThreadDesktop();
}
status = output1->DuplicateOutput((IUnknown *) device.get(), &dup);
if (SUCCEEDED(status)) {
return true;
}
// If we're not resyncing the thread desktop and we don't have permission to
// capture the current desktop, just bail immediately. Retrying won't help.
if (enumeration_only && status == E_ACCESSDENIED) {
break;
}
else {
std::this_thread::sleep_for(200ms);
}
}
BOOST_LOG(error) << "DuplicateOutput() test failed [0x"sv << util::hex(status).to_string_view() << ']';
return false;
}
int
display_base_t::init(const ::video::config_t &config, const std::string &display_name) {
std::once_flag windows_cpp_once_flag;
std::call_once(windows_cpp_once_flag, []() {
DECLARE_HANDLE(DPI_AWARENESS_CONTEXT);
typedef BOOL (*User32_SetProcessDpiAwarenessContext)(DPI_AWARENESS_CONTEXT value);
auto user32 = LoadLibraryA("user32.dll");
auto f = (User32_SetProcessDpiAwarenessContext) GetProcAddress(user32, "SetProcessDpiAwarenessContext");
if (f) {
f(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2);
}
FreeLibrary(user32);
});
// Get rectangle of full desktop for absolute mouse coordinates
env_width = GetSystemMetrics(SM_CXVIRTUALSCREEN);
env_height = GetSystemMetrics(SM_CYVIRTUALSCREEN);
HRESULT status;
// We must set the GPU preference before calling any DXGI APIs!
if (!probe_for_gpu_preference(display_name)) {
BOOST_LOG(warning) << "Failed to set GPU preference. Capture may not work!"sv;
}
status = CreateDXGIFactory1(IID_IDXGIFactory1, (void **) &factory);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create DXGIFactory1 [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
auto adapter_name = from_utf8(config::video.adapter_name);
auto output_name = from_utf8(display_name);
adapter_t::pointer adapter_p;
for (int tries = 0; tries < 2; ++tries) {
for (int x = 0; factory->EnumAdapters1(x, &adapter_p) != DXGI_ERROR_NOT_FOUND; ++x) {
dxgi::adapter_t adapter_tmp { adapter_p };
DXGI_ADAPTER_DESC1 adapter_desc;
adapter_tmp->GetDesc1(&adapter_desc);
if (!adapter_name.empty() && adapter_desc.Description != adapter_name) {
continue;
}
dxgi::output_t::pointer output_p;
for (int y = 0; adapter_tmp->EnumOutputs(y, &output_p) != DXGI_ERROR_NOT_FOUND; ++y) {
dxgi::output_t output_tmp { output_p };
DXGI_OUTPUT_DESC desc;
output_tmp->GetDesc(&desc);
if (!output_name.empty() && desc.DeviceName != output_name) {
continue;
}
if (desc.AttachedToDesktop && test_dxgi_duplication(adapter_tmp, output_tmp, false)) {
output = std::move(output_tmp);
offset_x = desc.DesktopCoordinates.left;
offset_y = desc.DesktopCoordinates.top;
width = desc.DesktopCoordinates.right - offset_x;
height = desc.DesktopCoordinates.bottom - offset_y;
display_rotation = desc.Rotation;
if (display_rotation == DXGI_MODE_ROTATION_ROTATE90 ||
display_rotation == DXGI_MODE_ROTATION_ROTATE270) {
width_before_rotation = height;
height_before_rotation = width;
}
else {
width_before_rotation = width;
height_before_rotation = height;
}
// left and bottom may be negative, yet absolute mouse coordinates start at 0x0
// Ensure offset starts at 0x0
offset_x -= GetSystemMetrics(SM_XVIRTUALSCREEN);
offset_y -= GetSystemMetrics(SM_YVIRTUALSCREEN);
break;
}
}
if (output) {
adapter = std::move(adapter_tmp);
break;
}
}
if (output) {
break;
}
// If we made it here without finding an output, try to power on the display and retry.
if (tries == 0) {
SetThreadExecutionState(ES_DISPLAY_REQUIRED);
Sleep(500);
}
}
if (!output) {
BOOST_LOG(error) << "Failed to locate an output device"sv;
return -1;
}
D3D_FEATURE_LEVEL featureLevels[] {
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
status = adapter->QueryInterface(IID_IDXGIAdapter, (void **) &adapter_p);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to query IDXGIAdapter interface"sv;
return -1;
}
status = D3D11CreateDevice(
adapter_p,
D3D_DRIVER_TYPE_UNKNOWN,
nullptr,
D3D11_CREATE_DEVICE_FLAGS,
featureLevels, sizeof(featureLevels) / sizeof(D3D_FEATURE_LEVEL),
D3D11_SDK_VERSION,
&device,
&feature_level,
&device_ctx);
adapter_p->Release();
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create D3D11 device [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
DXGI_ADAPTER_DESC adapter_desc;
adapter->GetDesc(&adapter_desc);
auto description = to_utf8(adapter_desc.Description);
BOOST_LOG(info)
<< std::endl
<< "Device Description : " << description << std::endl
<< "Device Vendor ID : 0x"sv << util::hex(adapter_desc.VendorId).to_string_view() << std::endl
<< "Device Device ID : 0x"sv << util::hex(adapter_desc.DeviceId).to_string_view() << std::endl
<< "Device Video Mem : "sv << adapter_desc.DedicatedVideoMemory / 1048576 << " MiB"sv << std::endl
<< "Device Sys Mem : "sv << adapter_desc.DedicatedSystemMemory / 1048576 << " MiB"sv << std::endl
<< "Share Sys Mem : "sv << adapter_desc.SharedSystemMemory / 1048576 << " MiB"sv << std::endl
<< "Feature Level : 0x"sv << util::hex(feature_level).to_string_view() << std::endl
<< "Capture size : "sv << width << 'x' << height << std::endl
<< "Offset : "sv << offset_x << 'x' << offset_y << std::endl
<< "Virtual Desktop : "sv << env_width << 'x' << env_height;
// Bump up thread priority
{
const DWORD flags = TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY;
TOKEN_PRIVILEGES tp;
HANDLE token;
LUID val;
if (OpenProcessToken(GetCurrentProcess(), flags, &token) &&
!!LookupPrivilegeValue(NULL, SE_INC_BASE_PRIORITY_NAME, &val)) {
tp.PrivilegeCount = 1;
tp.Privileges[0].Luid = val;
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
if (!AdjustTokenPrivileges(token, false, &tp, sizeof(tp), NULL, NULL)) {
BOOST_LOG(warning) << "Could not set privilege to increase GPU priority";
}
}
CloseHandle(token);
HMODULE gdi32 = GetModuleHandleA("GDI32");
if (gdi32) {
auto check_hags = [&](const LUID &adapter) -> bool {
auto d3dkmt_open_adapter = (PD3DKMTOpenAdapterFromLuid) GetProcAddress(gdi32, "D3DKMTOpenAdapterFromLuid");
auto d3dkmt_query_adapter_info = (PD3DKMTQueryAdapterInfo) GetProcAddress(gdi32, "D3DKMTQueryAdapterInfo");
auto d3dkmt_close_adapter = (PD3DKMTCloseAdapter) GetProcAddress(gdi32, "D3DKMTCloseAdapter");
if (!d3dkmt_open_adapter || !d3dkmt_query_adapter_info || !d3dkmt_close_adapter) {
BOOST_LOG(error) << "Couldn't load d3dkmt functions from gdi32.dll to determine GPU HAGS status";
return false;
}
D3DKMT_OPENADAPTERFROMLUID d3dkmt_adapter = { adapter };
if (FAILED(d3dkmt_open_adapter(&d3dkmt_adapter))) {
BOOST_LOG(error) << "D3DKMTOpenAdapterFromLuid() failed while trying to determine GPU HAGS status";
return false;
}
bool result;
D3DKMT_WDDM_2_7_CAPS d3dkmt_adapter_caps = {};
D3DKMT_QUERYADAPTERINFO d3dkmt_adapter_info = {};
d3dkmt_adapter_info.hAdapter = d3dkmt_adapter.hAdapter;
d3dkmt_adapter_info.Type = KMTQAITYPE_WDDM_2_7_CAPS;
d3dkmt_adapter_info.pPrivateDriverData = &d3dkmt_adapter_caps;
d3dkmt_adapter_info.PrivateDriverDataSize = sizeof(d3dkmt_adapter_caps);
if (SUCCEEDED(d3dkmt_query_adapter_info(&d3dkmt_adapter_info))) {
result = d3dkmt_adapter_caps.HwSchEnabled;
}
else {
BOOST_LOG(warning) << "D3DKMTQueryAdapterInfo() failed while trying to determine GPU HAGS status";
result = false;
}
D3DKMT_CLOSEADAPTER d3dkmt_close_adapter_wrap = { d3dkmt_adapter.hAdapter };
if (FAILED(d3dkmt_close_adapter(&d3dkmt_close_adapter_wrap))) {
BOOST_LOG(error) << "D3DKMTCloseAdapter() failed while trying to determine GPU HAGS status";
}
return result;
};
auto d3dkmt_set_process_priority = (PD3DKMTSetProcessSchedulingPriorityClass) GetProcAddress(gdi32, "D3DKMTSetProcessSchedulingPriorityClass");
if (d3dkmt_set_process_priority) {
auto priority = D3DKMT_SCHEDULINGPRIORITYCLASS_REALTIME;
bool hags_enabled = check_hags(adapter_desc.AdapterLuid);
if (adapter_desc.VendorId == 0x10DE) {
// As of 2023.07, NVIDIA driver has unfixed bug(s) where "realtime" can cause unrecoverable encoding freeze or outright driver crash
// This issue happens more frequently with HAGS, in DX12 games or when VRAM is filled close to max capacity
// Track OBS to see if they find better workaround or NVIDIA fixes it on their end, they seem to be in communication
if (hags_enabled && !config::video.nv_realtime_hags) priority = D3DKMT_SCHEDULINGPRIORITYCLASS_HIGH;
}
BOOST_LOG(info) << "Active GPU has HAGS " << (hags_enabled ? "enabled" : "disabled");
BOOST_LOG(info) << "Using " << (priority == D3DKMT_SCHEDULINGPRIORITYCLASS_HIGH ? "high" : "realtime") << " GPU priority";
if (FAILED(d3dkmt_set_process_priority(GetCurrentProcess(), priority))) {
BOOST_LOG(warning) << "Failed to adjust GPU priority. Please run application as administrator for optimal performance.";
}
}
else {
BOOST_LOG(error) << "Couldn't load D3DKMTSetProcessSchedulingPriorityClass function from gdi32.dll to adjust GPU priority";
}
}
dxgi::dxgi_t dxgi;
status = device->QueryInterface(IID_IDXGIDevice, (void **) &dxgi);
if (FAILED(status)) {
BOOST_LOG(warning) << "Failed to query DXGI interface from device [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
status = dxgi->SetGPUThreadPriority(7);
if (FAILED(status)) {
BOOST_LOG(warning) << "Failed to increase capture GPU thread priority. Please run application as administrator for optimal performance.";
}
}
// Try to reduce latency
{
dxgi::dxgi1_t dxgi {};
status = device->QueryInterface(IID_IDXGIDevice, (void **) &dxgi);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to query DXGI interface from device [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
status = dxgi->SetMaximumFrameLatency(1);
if (FAILED(status)) {
BOOST_LOG(warning) << "Failed to set maximum frame latency [0x"sv << util::hex(status).to_string_view() << ']';
}
}
client_frame_rate = config.framerate;
dxgi::output6_t output6 {};
status = output->QueryInterface(IID_IDXGIOutput6, (void **) &output6);
if (SUCCEEDED(status)) {
DXGI_OUTPUT_DESC1 desc1;
output6->GetDesc1(&desc1);
BOOST_LOG(info)
<< std::endl
<< "Colorspace : "sv << colorspace_to_string(desc1.ColorSpace) << std::endl
<< "Bits Per Color : "sv << desc1.BitsPerColor << std::endl
<< "Red Primary : ["sv << desc1.RedPrimary[0] << ',' << desc1.RedPrimary[1] << ']' << std::endl
<< "Green Primary : ["sv << desc1.GreenPrimary[0] << ',' << desc1.GreenPrimary[1] << ']' << std::endl
<< "Blue Primary : ["sv << desc1.BluePrimary[0] << ',' << desc1.BluePrimary[1] << ']' << std::endl
<< "White Point : ["sv << desc1.WhitePoint[0] << ',' << desc1.WhitePoint[1] << ']' << std::endl
<< "Min Luminance : "sv << desc1.MinLuminance << " nits"sv << std::endl
<< "Max Luminance : "sv << desc1.MaxLuminance << " nits"sv << std::endl
<< "Max Full Luminance : "sv << desc1.MaxFullFrameLuminance << " nits"sv;
}
if (!timer || !*timer) {
BOOST_LOG(error) << "Uninitialized high precision timer";
return -1;
}
return 0;
}
bool
display_base_t::is_hdr() {
dxgi::output6_t output6 {};
auto status = output->QueryInterface(IID_IDXGIOutput6, (void **) &output6);
if (FAILED(status)) {
BOOST_LOG(warning) << "Failed to query IDXGIOutput6 from the output"sv;
return false;
}
DXGI_OUTPUT_DESC1 desc1;
output6->GetDesc1(&desc1);
return desc1.ColorSpace == DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020;
}
bool
display_base_t::get_hdr_metadata(SS_HDR_METADATA &metadata) {
dxgi::output6_t output6 {};
std::memset(&metadata, 0, sizeof(metadata));
auto status = output->QueryInterface(IID_IDXGIOutput6, (void **) &output6);
if (FAILED(status)) {
BOOST_LOG(warning) << "Failed to query IDXGIOutput6 from the output"sv;
return false;
}
DXGI_OUTPUT_DESC1 desc1;
output6->GetDesc1(&desc1);
// The primaries reported here seem to correspond to scRGB (Rec. 709)
// which we then convert to Rec 2020 in our scRGB FP16 -> PQ shader
// prior to encoding. It's not clear to me if we're supposed to report
// the primaries of the original colorspace or the one we've converted
// it to, but let's just report Rec 2020 primaries and D65 white level
// to avoid confusing clients by reporting Rec 709 primaries with a
// Rec 2020 colorspace. It seems like most clients ignore the primaries
// in the metadata anyway (luminance range is most important).
desc1.RedPrimary[0] = 0.708f;
desc1.RedPrimary[1] = 0.292f;
desc1.GreenPrimary[0] = 0.170f;
desc1.GreenPrimary[1] = 0.797f;
desc1.BluePrimary[0] = 0.131f;
desc1.BluePrimary[1] = 0.046f;
desc1.WhitePoint[0] = 0.3127f;
desc1.WhitePoint[1] = 0.3290f;
metadata.displayPrimaries[0].x = desc1.RedPrimary[0] * 50000;
metadata.displayPrimaries[0].y = desc1.RedPrimary[1] * 50000;
metadata.displayPrimaries[1].x = desc1.GreenPrimary[0] * 50000;
metadata.displayPrimaries[1].y = desc1.GreenPrimary[1] * 50000;
metadata.displayPrimaries[2].x = desc1.BluePrimary[0] * 50000;
metadata.displayPrimaries[2].y = desc1.BluePrimary[1] * 50000;
metadata.whitePoint.x = desc1.WhitePoint[0] * 50000;
metadata.whitePoint.y = desc1.WhitePoint[1] * 50000;
metadata.maxDisplayLuminance = desc1.MaxLuminance;
metadata.minDisplayLuminance = desc1.MinLuminance * 10000;
// These are content-specific metadata parameters that this interface doesn't give us
metadata.maxContentLightLevel = 0;
metadata.maxFrameAverageLightLevel = 0;
metadata.maxFullFrameLuminance = desc1.MaxFullFrameLuminance;
return true;
}
const char *format_str[] = {
"DXGI_FORMAT_UNKNOWN",
"DXGI_FORMAT_R32G32B32A32_TYPELESS",
"DXGI_FORMAT_R32G32B32A32_FLOAT",
"DXGI_FORMAT_R32G32B32A32_UINT",
"DXGI_FORMAT_R32G32B32A32_SINT",
"DXGI_FORMAT_R32G32B32_TYPELESS",
"DXGI_FORMAT_R32G32B32_FLOAT",
"DXGI_FORMAT_R32G32B32_UINT",
"DXGI_FORMAT_R32G32B32_SINT",
"DXGI_FORMAT_R16G16B16A16_TYPELESS",
"DXGI_FORMAT_R16G16B16A16_FLOAT",
"DXGI_FORMAT_R16G16B16A16_UNORM",
"DXGI_FORMAT_R16G16B16A16_UINT",
"DXGI_FORMAT_R16G16B16A16_SNORM",
"DXGI_FORMAT_R16G16B16A16_SINT",
"DXGI_FORMAT_R32G32_TYPELESS",
"DXGI_FORMAT_R32G32_FLOAT",
"DXGI_FORMAT_R32G32_UINT",
"DXGI_FORMAT_R32G32_SINT",
"DXGI_FORMAT_R32G8X24_TYPELESS",
"DXGI_FORMAT_D32_FLOAT_S8X24_UINT",
"DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS",
"DXGI_FORMAT_X32_TYPELESS_G8X24_UINT",
"DXGI_FORMAT_R10G10B10A2_TYPELESS",
"DXGI_FORMAT_R10G10B10A2_UNORM",
"DXGI_FORMAT_R10G10B10A2_UINT",
"DXGI_FORMAT_R11G11B10_FLOAT",
"DXGI_FORMAT_R8G8B8A8_TYPELESS",
"DXGI_FORMAT_R8G8B8A8_UNORM",
"DXGI_FORMAT_R8G8B8A8_UNORM_SRGB",
"DXGI_FORMAT_R8G8B8A8_UINT",
"DXGI_FORMAT_R8G8B8A8_SNORM",
"DXGI_FORMAT_R8G8B8A8_SINT",
"DXGI_FORMAT_R16G16_TYPELESS",
"DXGI_FORMAT_R16G16_FLOAT",
"DXGI_FORMAT_R16G16_UNORM",
"DXGI_FORMAT_R16G16_UINT",
"DXGI_FORMAT_R16G16_SNORM",
"DXGI_FORMAT_R16G16_SINT",
"DXGI_FORMAT_R32_TYPELESS",
"DXGI_FORMAT_D32_FLOAT",
"DXGI_FORMAT_R32_FLOAT",
"DXGI_FORMAT_R32_UINT",
"DXGI_FORMAT_R32_SINT",
"DXGI_FORMAT_R24G8_TYPELESS",
"DXGI_FORMAT_D24_UNORM_S8_UINT",
"DXGI_FORMAT_R24_UNORM_X8_TYPELESS",
"DXGI_FORMAT_X24_TYPELESS_G8_UINT",
"DXGI_FORMAT_R8G8_TYPELESS",
"DXGI_FORMAT_R8G8_UNORM",
"DXGI_FORMAT_R8G8_UINT",
"DXGI_FORMAT_R8G8_SNORM",
"DXGI_FORMAT_R8G8_SINT",
"DXGI_FORMAT_R16_TYPELESS",
"DXGI_FORMAT_R16_FLOAT",
"DXGI_FORMAT_D16_UNORM",
"DXGI_FORMAT_R16_UNORM",
"DXGI_FORMAT_R16_UINT",
"DXGI_FORMAT_R16_SNORM",
"DXGI_FORMAT_R16_SINT",
"DXGI_FORMAT_R8_TYPELESS",
"DXGI_FORMAT_R8_UNORM",
"DXGI_FORMAT_R8_UINT",
"DXGI_FORMAT_R8_SNORM",
"DXGI_FORMAT_R8_SINT",
"DXGI_FORMAT_A8_UNORM",
"DXGI_FORMAT_R1_UNORM",
"DXGI_FORMAT_R9G9B9E5_SHAREDEXP",
"DXGI_FORMAT_R8G8_B8G8_UNORM",
"DXGI_FORMAT_G8R8_G8B8_UNORM",
"DXGI_FORMAT_BC1_TYPELESS",
"DXGI_FORMAT_BC1_UNORM",
"DXGI_FORMAT_BC1_UNORM_SRGB",
"DXGI_FORMAT_BC2_TYPELESS",
"DXGI_FORMAT_BC2_UNORM",
"DXGI_FORMAT_BC2_UNORM_SRGB",
"DXGI_FORMAT_BC3_TYPELESS",
"DXGI_FORMAT_BC3_UNORM",
"DXGI_FORMAT_BC3_UNORM_SRGB",
"DXGI_FORMAT_BC4_TYPELESS",
"DXGI_FORMAT_BC4_UNORM",
"DXGI_FORMAT_BC4_SNORM",
"DXGI_FORMAT_BC5_TYPELESS",
"DXGI_FORMAT_BC5_UNORM",
"DXGI_FORMAT_BC5_SNORM",
"DXGI_FORMAT_B5G6R5_UNORM",
"DXGI_FORMAT_B5G5R5A1_UNORM",
"DXGI_FORMAT_B8G8R8A8_UNORM",
"DXGI_FORMAT_B8G8R8X8_UNORM",
"DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM",
"DXGI_FORMAT_B8G8R8A8_TYPELESS",
"DXGI_FORMAT_B8G8R8A8_UNORM_SRGB",
"DXGI_FORMAT_B8G8R8X8_TYPELESS",
"DXGI_FORMAT_B8G8R8X8_UNORM_SRGB",
"DXGI_FORMAT_BC6H_TYPELESS",
"DXGI_FORMAT_BC6H_UF16",
"DXGI_FORMAT_BC6H_SF16",
"DXGI_FORMAT_BC7_TYPELESS",
"DXGI_FORMAT_BC7_UNORM",
"DXGI_FORMAT_BC7_UNORM_SRGB",
"DXGI_FORMAT_AYUV",
"DXGI_FORMAT_Y410",
"DXGI_FORMAT_Y416",
"DXGI_FORMAT_NV12",
"DXGI_FORMAT_P010",
"DXGI_FORMAT_P016",
"DXGI_FORMAT_420_OPAQUE",
"DXGI_FORMAT_YUY2",
"DXGI_FORMAT_Y210",
"DXGI_FORMAT_Y216",
"DXGI_FORMAT_NV11",
"DXGI_FORMAT_AI44",
"DXGI_FORMAT_IA44",
"DXGI_FORMAT_P8",
"DXGI_FORMAT_A8P8",
"DXGI_FORMAT_B4G4R4A4_UNORM",
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
"DXGI_FORMAT_P208",
"DXGI_FORMAT_V208",
"DXGI_FORMAT_V408"
};
const char *
display_base_t::dxgi_format_to_string(DXGI_FORMAT format) {
return format_str[format];
}
const char *
display_base_t::colorspace_to_string(DXGI_COLOR_SPACE_TYPE type) {
const char *type_str[] = {
"DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709",
"DXGI_COLOR_SPACE_RGB_FULL_G10_NONE_P709",
"DXGI_COLOR_SPACE_RGB_STUDIO_G22_NONE_P709",
"DXGI_COLOR_SPACE_RGB_STUDIO_G22_NONE_P2020",
"DXGI_COLOR_SPACE_RESERVED",
"DXGI_COLOR_SPACE_YCBCR_FULL_G22_NONE_P709_X601",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P601",
"DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P601",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P709",
"DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P709",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020",
"DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020",
"DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_LEFT_P2020",
"DXGI_COLOR_SPACE_RGB_STUDIO_G2084_NONE_P2020",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_TOPLEFT_P2020",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_TOPLEFT_P2020",
"DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P2020",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_GHLG_TOPLEFT_P2020",
"DXGI_COLOR_SPACE_YCBCR_FULL_GHLG_TOPLEFT_P2020",
"DXGI_COLOR_SPACE_RGB_STUDIO_G24_NONE_P709",
"DXGI_COLOR_SPACE_RGB_STUDIO_G24_NONE_P2020",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_G24_LEFT_P709",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_G24_LEFT_P2020",
"DXGI_COLOR_SPACE_YCBCR_STUDIO_G24_TOPLEFT_P2020",
};
if (type < ARRAYSIZE(type_str)) {
return type_str[type];
}
else {
return "UNKNOWN";
}
}
} // namespace platf::dxgi
namespace platf {
/**
* Pick a display adapter and capture method.
* @param hwdevice_type enables possible use of hardware encoder
*/
std::shared_ptr<display_t>
display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config) {
if (config::video.capture == "ddx" || config::video.capture.empty()) {
if (hwdevice_type == mem_type_e::dxgi) {
auto disp = std::make_shared<dxgi::display_ddup_vram_t>();
if (!disp->init(config, display_name)) {
return disp;
}
}
else if (hwdevice_type == mem_type_e::system) {
auto disp = std::make_shared<dxgi::display_ddup_ram_t>();
if (!disp->init(config, display_name)) {
return disp;
}
}
}
if (config::video.capture == "wgc" || config::video.capture.empty()) {
if (hwdevice_type == mem_type_e::dxgi) {
auto disp = std::make_shared<dxgi::display_wgc_vram_t>();
if (!disp->init(config, display_name)) {
return disp;
}
}
else if (hwdevice_type == mem_type_e::system) {
auto disp = std::make_shared<dxgi::display_wgc_ram_t>();
if (!disp->init(config, display_name)) {
return disp;
}
}
}
// ddx and wgc failed
return nullptr;
}
std::vector<std::string>
display_names(mem_type_e) {
std::vector<std::string> display_names;
HRESULT status;
BOOST_LOG(debug) << "Detecting monitors..."sv;
// We must set the GPU preference before calling any DXGI APIs!
if (!dxgi::probe_for_gpu_preference(config::video.output_name)) {
BOOST_LOG(warning) << "Failed to set GPU preference. Capture may not work!"sv;
}
// We sync the thread desktop once before we start the enumeration process
// to ensure test_dxgi_duplication() returns consistent results for all GPUs
// even if the current desktop changes during our enumeration process.
// It is critical that we either fully succeed in enumeration or fully fail,
// otherwise it can lead to the capture code switching monitors unexpectedly.
syncThreadDesktop();
dxgi::factory1_t factory;
status = CreateDXGIFactory1(IID_IDXGIFactory1, (void **) &factory);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create DXGIFactory1 [0x"sv << util::hex(status).to_string_view() << ']';
return {};
}
dxgi::adapter_t adapter;
for (int x = 0; factory->EnumAdapters1(x, &adapter) != DXGI_ERROR_NOT_FOUND; ++x) {
DXGI_ADAPTER_DESC1 adapter_desc;
adapter->GetDesc1(&adapter_desc);
BOOST_LOG(debug)
<< std::endl
<< "====== ADAPTER ====="sv << std::endl
<< "Device Name : "sv << to_utf8(adapter_desc.Description) << std::endl
<< "Device Vendor ID : 0x"sv << util::hex(adapter_desc.VendorId).to_string_view() << std::endl
<< "Device Device ID : 0x"sv << util::hex(adapter_desc.DeviceId).to_string_view() << std::endl
<< "Device Video Mem : "sv << adapter_desc.DedicatedVideoMemory / 1048576 << " MiB"sv << std::endl
<< "Device Sys Mem : "sv << adapter_desc.DedicatedSystemMemory / 1048576 << " MiB"sv << std::endl
<< "Share Sys Mem : "sv << adapter_desc.SharedSystemMemory / 1048576 << " MiB"sv << std::endl
<< std::endl
<< " ====== OUTPUT ======"sv << std::endl;
dxgi::output_t::pointer output_p {};
for (int y = 0; adapter->EnumOutputs(y, &output_p) != DXGI_ERROR_NOT_FOUND; ++y) {
dxgi::output_t output { output_p };
DXGI_OUTPUT_DESC desc;
output->GetDesc(&desc);
auto device_name = to_utf8(desc.DeviceName);
auto width = desc.DesktopCoordinates.right - desc.DesktopCoordinates.left;
auto height = desc.DesktopCoordinates.bottom - desc.DesktopCoordinates.top;
BOOST_LOG(debug)
<< " Output Name : "sv << device_name << std::endl
<< " AttachedToDesktop : "sv << (desc.AttachedToDesktop ? "yes"sv : "no"sv) << std::endl
<< " Resolution : "sv << width << 'x' << height << std::endl
<< std::endl;
// Don't include the display in the list if we can't actually capture it
if (desc.AttachedToDesktop && dxgi::test_dxgi_duplication(adapter, output, true)) {
display_names.emplace_back(std::move(device_name));
}
}
}
return display_names;
}
/**
* @brief Returns if GPUs/drivers have changed since the last call to this function.
* @return `true` if a change has occurred or if it is unknown whether a change occurred.
*/
bool
needs_encoder_reenumeration() {
// Serialize access to the static DXGI factory
static std::mutex reenumeration_state_lock;
auto lg = std::lock_guard(reenumeration_state_lock);
// Keep a reference to the DXGI factory, which will keep track of changes internally.
static dxgi::factory1_t factory;
if (!factory || !factory->IsCurrent()) {
factory.reset();
auto status = CreateDXGIFactory1(IID_IDXGIFactory1, (void **) &factory);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create DXGIFactory1 [0x"sv << util::hex(status).to_string_view() << ']';
factory.release();
}
// Always request reenumeration on the first streaming session just to ensure we
// can deal with any initialization races that may occur when the system is booting.
BOOST_LOG(info) << "Encoder reenumeration is required"sv;
return true;
}
else {
// The DXGI factory from last time is still current, so no encoder changes have occurred.
return false;
}
}
} // namespace platf
| 46,304
|
C++
|
.cpp
| 1,027
| 37.925024
| 157
| 0.646477
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,015
|
input.cpp
|
LizardByte_Sunshine/src/platform/windows/input.cpp
|
/**
* @file src/platform/windows/input.cpp
* @brief Definitions for input handling on Windows.
*/
#define WINVER 0x0A00
#include <windows.h>
#include <cmath>
#include <thread>
#include <ViGEm/Client.h>
#include "keylayout.h"
#include "misc.h"
#include "src/config.h"
#include "src/globals.h"
#include "src/logging.h"
#include "src/platform/common.h"
#ifdef __MINGW32__
DECLARE_HANDLE(HSYNTHETICPOINTERDEVICE);
WINUSERAPI HSYNTHETICPOINTERDEVICE WINAPI
CreateSyntheticPointerDevice(POINTER_INPUT_TYPE pointerType, ULONG maxCount, POINTER_FEEDBACK_MODE mode);
WINUSERAPI BOOL WINAPI
InjectSyntheticPointerInput(HSYNTHETICPOINTERDEVICE device, CONST POINTER_TYPE_INFO *pointerInfo, UINT32 count);
WINUSERAPI VOID WINAPI
DestroySyntheticPointerDevice(HSYNTHETICPOINTERDEVICE device);
#endif
namespace platf {
using namespace std::literals;
thread_local HDESK _lastKnownInputDesktop = nullptr;
constexpr touch_port_t target_touch_port {
0, 0,
65535, 65535
};
using client_t = util::safe_ptr<_VIGEM_CLIENT_T, vigem_free>;
using target_t = util::safe_ptr<_VIGEM_TARGET_T, vigem_target_free>;
void CALLBACK
x360_notify(
client_t::pointer client,
target_t::pointer target,
std::uint8_t largeMotor, std::uint8_t smallMotor,
std::uint8_t /* led_number */,
void *userdata);
void CALLBACK
ds4_notify(
client_t::pointer client,
target_t::pointer target,
std::uint8_t largeMotor, std::uint8_t smallMotor,
DS4_LIGHTBAR_COLOR /* led_color */,
void *userdata);
struct gp_touch_context_t {
uint8_t pointerIndex;
uint16_t x;
uint16_t y;
};
struct gamepad_context_t {
target_t gp;
feedback_queue_t feedback_queue;
union {
XUSB_REPORT x360;
DS4_REPORT_EX ds4;
} report;
// Map from pointer ID to pointer index
std::map<uint32_t, uint8_t> pointer_id_map;
uint8_t available_pointers;
uint8_t client_relative_index;
thread_pool_util::ThreadPool::task_id_t repeat_task {};
std::chrono::steady_clock::time_point last_report_ts;
gamepad_feedback_msg_t last_rumble;
gamepad_feedback_msg_t last_rgb_led;
};
constexpr float EARTH_G = 9.80665f;
#define MPS2_TO_DS4_ACCEL(x) (int32_t)(((x) / EARTH_G) * 8192)
#define DPS_TO_DS4_GYRO(x) (int32_t)((x) * (1024 / 64))
#define APPLY_CALIBRATION(val, bias, scale) (int32_t)(((float) (val) + (bias)) / (scale))
constexpr DS4_TOUCH ds4_touch_unused = {
.bPacketCounter = 0,
.bIsUpTrackingNum1 = 0x80,
.bTouchData1 = { 0x00, 0x00, 0x00 },
.bIsUpTrackingNum2 = 0x80,
.bTouchData2 = { 0x00, 0x00, 0x00 },
};
// See https://github.com/ViGEm/ViGEmBus/blob/22835473d17fbf0c4d4bb2f2d42fd692b6e44df4/sys/Ds4Pdo.cpp#L153-L164
constexpr DS4_REPORT_EX ds4_report_init_ex = {
{ { .bThumbLX = 0x80,
.bThumbLY = 0x80,
.bThumbRX = 0x80,
.bThumbRY = 0x80,
.wButtons = DS4_BUTTON_DPAD_NONE,
.bSpecial = 0,
.bTriggerL = 0,
.bTriggerR = 0,
.wTimestamp = 0,
.bBatteryLvl = 0xFF,
.wGyroX = 0,
.wGyroY = 0,
.wGyroZ = 0,
.wAccelX = 0,
.wAccelY = 0,
.wAccelZ = 0,
._bUnknown1 = { 0x00, 0x00, 0x00, 0x00, 0x00 },
.bBatteryLvlSpecial = 0x1A, // Wired - Full battery
._bUnknown2 = { 0x00, 0x00 },
.bTouchPacketsN = 1,
.sCurrentTouch = ds4_touch_unused,
.sPreviousTouch = { ds4_touch_unused, ds4_touch_unused } } }
};
/**
* @brief Updates the DS4 input report with the provided motion data.
* @details Acceleration is in m/s^2 and gyro is in deg/s.
* @param gamepad The gamepad to update.
* @param motion_type The type of motion data.
* @param x X component of motion.
* @param y Y component of motion.
* @param z Z component of motion.
*/
static void
ds4_update_motion(gamepad_context_t &gamepad, uint8_t motion_type, float x, float y, float z) {
auto &report = gamepad.report.ds4.Report;
// Use int32 to process this data, so we can clamp if needed.
int32_t intX, intY, intZ;
switch (motion_type) {
case LI_MOTION_TYPE_ACCEL:
// Convert to the DS4's accelerometer scale
intX = MPS2_TO_DS4_ACCEL(x);
intY = MPS2_TO_DS4_ACCEL(y);
intZ = MPS2_TO_DS4_ACCEL(z);
// Apply the inverse of ViGEmBus's calibration data
intX = APPLY_CALIBRATION(intX, -297, 1.010796f);
intY = APPLY_CALIBRATION(intY, -42, 1.014614f);
intZ = APPLY_CALIBRATION(intZ, -512, 1.024768f);
break;
case LI_MOTION_TYPE_GYRO:
// Convert to the DS4's gyro scale
intX = DPS_TO_DS4_GYRO(x);
intY = DPS_TO_DS4_GYRO(y);
intZ = DPS_TO_DS4_GYRO(z);
// Apply the inverse of ViGEmBus's calibration data
intX = APPLY_CALIBRATION(intX, 1, 0.977596f);
intY = APPLY_CALIBRATION(intY, 0, 0.972370f);
intZ = APPLY_CALIBRATION(intZ, 0, 0.971550f);
break;
default:
return;
}
// Clamp the values to the range of the data type
intX = std::clamp(intX, INT16_MIN, INT16_MAX);
intY = std::clamp(intY, INT16_MIN, INT16_MAX);
intZ = std::clamp(intZ, INT16_MIN, INT16_MAX);
// Populate the report
switch (motion_type) {
case LI_MOTION_TYPE_ACCEL:
report.wAccelX = (int16_t) intX;
report.wAccelY = (int16_t) intY;
report.wAccelZ = (int16_t) intZ;
break;
case LI_MOTION_TYPE_GYRO:
report.wGyroX = (int16_t) intX;
report.wGyroY = (int16_t) intY;
report.wGyroZ = (int16_t) intZ;
break;
default:
return;
}
}
class vigem_t {
public:
int
init() {
// Probe ViGEm during startup to see if we can successfully attach gamepads. This will allow us to
// immediately display the error message in the web UI even before the user tries to stream.
client_t client { vigem_alloc() };
VIGEM_ERROR status = vigem_connect(client.get());
if (!VIGEM_SUCCESS(status)) {
// Log a special fatal message for this case to show the error in the web UI
BOOST_LOG(fatal) << "ViGEmBus is not installed or running. You must install ViGEmBus for gamepad support!"sv;
}
else {
vigem_disconnect(client.get());
}
gamepads.resize(MAX_GAMEPADS);
return 0;
}
/**
* @brief Attaches a new gamepad.
* @param id The gamepad ID.
* @param feedback_queue The queue for posting messages back to the client.
* @param gp_type The type of gamepad.
* @return 0 on success.
*/
int
alloc_gamepad_internal(const gamepad_id_t &id, feedback_queue_t &feedback_queue, VIGEM_TARGET_TYPE gp_type) {
auto &gamepad = gamepads[id.globalIndex];
assert(!gamepad.gp);
gamepad.client_relative_index = id.clientRelativeIndex;
gamepad.last_report_ts = std::chrono::steady_clock::now();
// Establish a connect to the ViGEm driver if we don't have one yet
if (!client) {
BOOST_LOG(debug) << "Connecting to ViGEmBus driver"sv;
client.reset(vigem_alloc());
auto status = vigem_connect(client.get());
if (!VIGEM_SUCCESS(status)) {
BOOST_LOG(warning) << "Couldn't setup connection to ViGEm for gamepad support ["sv << util::hex(status).to_string_view() << ']';
client.reset();
return -1;
}
}
if (gp_type == Xbox360Wired) {
gamepad.gp.reset(vigem_target_x360_alloc());
XUSB_REPORT_INIT(&gamepad.report.x360);
}
else {
gamepad.gp.reset(vigem_target_ds4_alloc());
// There is no equivalent DS4_REPORT_EX_INIT()
gamepad.report.ds4 = ds4_report_init_ex;
// Set initial accelerometer and gyro state
ds4_update_motion(gamepad, LI_MOTION_TYPE_ACCEL, 0.0f, EARTH_G, 0.0f);
ds4_update_motion(gamepad, LI_MOTION_TYPE_GYRO, 0.0f, 0.0f, 0.0f);
// Request motion events from the client at 100 Hz
feedback_queue->raise(gamepad_feedback_msg_t::make_motion_event_state(gamepad.client_relative_index, LI_MOTION_TYPE_ACCEL, 100));
feedback_queue->raise(gamepad_feedback_msg_t::make_motion_event_state(gamepad.client_relative_index, LI_MOTION_TYPE_GYRO, 100));
// We support pointer index 0 and 1
gamepad.available_pointers = 0x3;
}
auto status = vigem_target_add(client.get(), gamepad.gp.get());
if (!VIGEM_SUCCESS(status)) {
BOOST_LOG(error) << "Couldn't add Gamepad to ViGEm connection ["sv << util::hex(status).to_string_view() << ']';
return -1;
}
gamepad.feedback_queue = std::move(feedback_queue);
if (gp_type == Xbox360Wired) {
status = vigem_target_x360_register_notification(client.get(), gamepad.gp.get(), x360_notify, this);
}
else {
status = vigem_target_ds4_register_notification(client.get(), gamepad.gp.get(), ds4_notify, this);
}
if (!VIGEM_SUCCESS(status)) {
BOOST_LOG(warning) << "Couldn't register notifications for rumble support ["sv << util::hex(status).to_string_view() << ']';
}
return 0;
}
/**
* @brief Detaches the specified gamepad
* @param nr The gamepad.
*/
void
free_target(int nr) {
auto &gamepad = gamepads[nr];
if (gamepad.repeat_task) {
task_pool.cancel(gamepad.repeat_task);
gamepad.repeat_task = 0;
}
if (gamepad.gp && vigem_target_is_attached(gamepad.gp.get())) {
auto status = vigem_target_remove(client.get(), gamepad.gp.get());
if (!VIGEM_SUCCESS(status)) {
BOOST_LOG(warning) << "Couldn't detach gamepad from ViGEm ["sv << util::hex(status).to_string_view() << ']';
}
}
gamepad.gp.reset();
// Disconnect from ViGEm if we just removed the last gamepad
bool disconnect = true;
for (auto &gamepad : gamepads) {
if (gamepad.gp && vigem_target_is_attached(gamepad.gp.get())) {
disconnect = false;
break;
}
}
if (disconnect) {
BOOST_LOG(debug) << "Disconnecting from ViGEmBus driver"sv;
vigem_disconnect(client.get());
client.reset();
}
}
/**
* @brief Pass rumble data back to the client.
* @param target The gamepad.
* @param largeMotor The large motor.
* @param smallMotor The small motor.
*/
void
rumble(target_t::pointer target, std::uint8_t largeMotor, std::uint8_t smallMotor) {
for (int x = 0; x < gamepads.size(); ++x) {
auto &gamepad = gamepads[x];
if (gamepad.gp.get() == target) {
// Convert from 8-bit to 16-bit values
uint16_t normalizedLargeMotor = largeMotor << 8;
uint16_t normalizedSmallMotor = smallMotor << 8;
// Don't resend duplicate rumble data
if (normalizedSmallMotor != gamepad.last_rumble.data.rumble.highfreq ||
normalizedLargeMotor != gamepad.last_rumble.data.rumble.lowfreq) {
// We have to use the client-relative index when communicating back to the client
gamepad_feedback_msg_t msg = gamepad_feedback_msg_t::make_rumble(
gamepad.client_relative_index, normalizedLargeMotor, normalizedSmallMotor);
gamepad.feedback_queue->raise(msg);
gamepad.last_rumble = msg;
}
return;
}
}
}
/**
* @brief Pass RGB LED data back to the client.
* @param target The gamepad.
* @param r The red channel.
* @param g The red channel.
* @param b The red channel.
*/
void
set_rgb_led(target_t::pointer target, std::uint8_t r, std::uint8_t g, std::uint8_t b) {
for (int x = 0; x < gamepads.size(); ++x) {
auto &gamepad = gamepads[x];
if (gamepad.gp.get() == target) {
// Don't resend duplicate RGB data
if (r != gamepad.last_rgb_led.data.rgb_led.r ||
g != gamepad.last_rgb_led.data.rgb_led.g ||
b != gamepad.last_rgb_led.data.rgb_led.b) {
// We have to use the client-relative index when communicating back to the client
gamepad_feedback_msg_t msg = gamepad_feedback_msg_t::make_rgb_led(gamepad.client_relative_index, r, g, b);
gamepad.feedback_queue->raise(msg);
gamepad.last_rgb_led = msg;
}
return;
}
}
}
/**
* @brief vigem_t destructor.
*/
~vigem_t() {
if (client) {
for (auto &gamepad : gamepads) {
if (gamepad.gp && vigem_target_is_attached(gamepad.gp.get())) {
auto status = vigem_target_remove(client.get(), gamepad.gp.get());
if (!VIGEM_SUCCESS(status)) {
BOOST_LOG(warning) << "Couldn't detach gamepad from ViGEm ["sv << util::hex(status).to_string_view() << ']';
}
}
}
vigem_disconnect(client.get());
}
}
std::vector<gamepad_context_t> gamepads;
client_t client;
};
void CALLBACK
x360_notify(
client_t::pointer client,
target_t::pointer target,
std::uint8_t largeMotor, std::uint8_t smallMotor,
std::uint8_t /* led_number */,
void *userdata) {
BOOST_LOG(debug)
<< "largeMotor: "sv << (int) largeMotor << std::endl
<< "smallMotor: "sv << (int) smallMotor;
task_pool.push(&vigem_t::rumble, (vigem_t *) userdata, target, largeMotor, smallMotor);
}
void CALLBACK
ds4_notify(
client_t::pointer client,
target_t::pointer target,
std::uint8_t largeMotor, std::uint8_t smallMotor,
DS4_LIGHTBAR_COLOR led_color,
void *userdata) {
BOOST_LOG(debug)
<< "largeMotor: "sv << (int) largeMotor << std::endl
<< "smallMotor: "sv << (int) smallMotor << std::endl
<< "LED: "sv << util::hex(led_color.Red).to_string_view() << ' '
<< util::hex(led_color.Green).to_string_view() << ' '
<< util::hex(led_color.Blue).to_string_view() << std::endl;
task_pool.push(&vigem_t::rumble, (vigem_t *) userdata, target, largeMotor, smallMotor);
task_pool.push(&vigem_t::set_rgb_led, (vigem_t *) userdata, target, led_color.Red, led_color.Green, led_color.Blue);
}
struct input_raw_t {
~input_raw_t() {
delete vigem;
}
vigem_t *vigem;
decltype(CreateSyntheticPointerDevice) *fnCreateSyntheticPointerDevice;
decltype(InjectSyntheticPointerInput) *fnInjectSyntheticPointerInput;
decltype(DestroySyntheticPointerDevice) *fnDestroySyntheticPointerDevice;
};
input_t
input() {
input_t result { new input_raw_t {} };
auto &raw = *(input_raw_t *) result.get();
raw.vigem = new vigem_t {};
if (raw.vigem->init()) {
delete raw.vigem;
raw.vigem = nullptr;
}
// Get pointers to virtual touch/pen input functions (Win10 1809+)
raw.fnCreateSyntheticPointerDevice = (decltype(CreateSyntheticPointerDevice) *) GetProcAddress(GetModuleHandleA("user32.dll"), "CreateSyntheticPointerDevice");
raw.fnInjectSyntheticPointerInput = (decltype(InjectSyntheticPointerInput) *) GetProcAddress(GetModuleHandleA("user32.dll"), "InjectSyntheticPointerInput");
raw.fnDestroySyntheticPointerDevice = (decltype(DestroySyntheticPointerDevice) *) GetProcAddress(GetModuleHandleA("user32.dll"), "DestroySyntheticPointerDevice");
return result;
}
/**
* @brief Calls SendInput() and switches input desktops if required.
* @param i The `INPUT` struct to send.
*/
void
send_input(INPUT &i) {
retry:
auto send = SendInput(1, &i, sizeof(INPUT));
if (send != 1) {
auto hDesk = syncThreadDesktop();
if (_lastKnownInputDesktop != hDesk) {
_lastKnownInputDesktop = hDesk;
goto retry;
}
BOOST_LOG(error) << "Couldn't send input"sv;
}
}
/**
* @brief Calls InjectSyntheticPointerInput() and switches input desktops if required.
* @details Must only be called if InjectSyntheticPointerInput() is available.
* @param input The global input context.
* @param device The synthetic pointer device handle.
* @param pointerInfo An array of `POINTER_TYPE_INFO` structs.
* @param count The number of elements in `pointerInfo`.
* @return true if input was successfully injected.
*/
bool
inject_synthetic_pointer_input(input_raw_t *input, HSYNTHETICPOINTERDEVICE device, const POINTER_TYPE_INFO *pointerInfo, UINT32 count) {
retry:
if (!input->fnInjectSyntheticPointerInput(device, pointerInfo, count)) {
auto hDesk = syncThreadDesktop();
if (_lastKnownInputDesktop != hDesk) {
_lastKnownInputDesktop = hDesk;
goto retry;
}
return false;
}
return true;
}
void
abs_mouse(input_t &input, const touch_port_t &touch_port, float x, float y) {
INPUT i {};
i.type = INPUT_MOUSE;
auto &mi = i.mi;
mi.dwFlags =
MOUSEEVENTF_MOVE |
MOUSEEVENTF_ABSOLUTE |
// MOUSEEVENTF_VIRTUALDESK maps to the entirety of the desktop rather than the primary desktop
MOUSEEVENTF_VIRTUALDESK;
auto scaled_x = std::lround((x + touch_port.offset_x) * ((float) target_touch_port.width / (float) touch_port.width));
auto scaled_y = std::lround((y + touch_port.offset_y) * ((float) target_touch_port.height / (float) touch_port.height));
mi.dx = scaled_x;
mi.dy = scaled_y;
send_input(i);
}
void
move_mouse(input_t &input, int deltaX, int deltaY) {
INPUT i {};
i.type = INPUT_MOUSE;
auto &mi = i.mi;
mi.dwFlags = MOUSEEVENTF_MOVE;
mi.dx = deltaX;
mi.dy = deltaY;
send_input(i);
}
util::point_t
get_mouse_loc(input_t &input) {
throw std::runtime_error("not implemented yet, has to pass tests");
// TODO: Tests are failing, something wrong here?
POINT p;
if (!GetCursorPos(&p)) {
return util::point_t { 0.0, 0.0 };
}
return util::point_t {
(double) p.x,
(double) p.y
};
}
void
button_mouse(input_t &input, int button, bool release) {
INPUT i {};
i.type = INPUT_MOUSE;
auto &mi = i.mi;
if (button == 1) {
mi.dwFlags = release ? MOUSEEVENTF_LEFTUP : MOUSEEVENTF_LEFTDOWN;
}
else if (button == 2) {
mi.dwFlags = release ? MOUSEEVENTF_MIDDLEUP : MOUSEEVENTF_MIDDLEDOWN;
}
else if (button == 3) {
mi.dwFlags = release ? MOUSEEVENTF_RIGHTUP : MOUSEEVENTF_RIGHTDOWN;
}
else if (button == 4) {
mi.dwFlags = release ? MOUSEEVENTF_XUP : MOUSEEVENTF_XDOWN;
mi.mouseData = XBUTTON1;
}
else {
mi.dwFlags = release ? MOUSEEVENTF_XUP : MOUSEEVENTF_XDOWN;
mi.mouseData = XBUTTON2;
}
send_input(i);
}
void
scroll(input_t &input, int distance) {
INPUT i {};
i.type = INPUT_MOUSE;
auto &mi = i.mi;
mi.dwFlags = MOUSEEVENTF_WHEEL;
mi.mouseData = distance;
send_input(i);
}
void
hscroll(input_t &input, int distance) {
INPUT i {};
i.type = INPUT_MOUSE;
auto &mi = i.mi;
mi.dwFlags = MOUSEEVENTF_HWHEEL;
mi.mouseData = distance;
send_input(i);
}
void
keyboard_update(input_t &input, uint16_t modcode, bool release, uint8_t flags) {
INPUT i {};
i.type = INPUT_KEYBOARD;
auto &ki = i.ki;
// If the client did not normalize this VK code to a US English layout, we can't accurately convert it to a scancode.
// If we're set to always send scancodes, we will use the current keyboard layout to convert to a scancode. This will
// assume the client and host have the same keyboard layout, but it's probably better than always using US English.
if (!(flags & SS_KBE_FLAG_NON_NORMALIZED)) {
// Mask off the extended key byte
ki.wScan = VK_TO_SCANCODE_MAP[modcode & 0xFF];
}
else if (config::input.always_send_scancodes && modcode != VK_LWIN && modcode != VK_RWIN && modcode != VK_PAUSE) {
// For some reason, MapVirtualKey(VK_LWIN, MAPVK_VK_TO_VSC) doesn't seem to work :/
ki.wScan = MapVirtualKey(modcode, MAPVK_VK_TO_VSC);
}
// If we can map this to a scancode, send it as a scancode for maximum game compatibility.
if (ki.wScan) {
ki.dwFlags = KEYEVENTF_SCANCODE;
}
else {
// If there is no scancode mapping or it's non-normalized, send it as a regular VK event.
ki.wVk = modcode;
}
// https://docs.microsoft.com/en-us/windows/win32/inputdev/about-keyboard-input#keystroke-message-flags
switch (modcode) {
case VK_LWIN:
case VK_RWIN:
case VK_RMENU:
case VK_RCONTROL:
case VK_INSERT:
case VK_DELETE:
case VK_HOME:
case VK_END:
case VK_PRIOR:
case VK_NEXT:
case VK_UP:
case VK_DOWN:
case VK_LEFT:
case VK_RIGHT:
case VK_DIVIDE:
case VK_APPS:
ki.dwFlags |= KEYEVENTF_EXTENDEDKEY;
break;
default:
break;
}
if (release) {
ki.dwFlags |= KEYEVENTF_KEYUP;
}
send_input(i);
}
struct client_input_raw_t: public client_input_t {
client_input_raw_t(input_t &input) {
global = (input_raw_t *) input.get();
}
~client_input_raw_t() override {
if (penRepeatTask) {
task_pool.cancel(penRepeatTask);
}
if (touchRepeatTask) {
task_pool.cancel(touchRepeatTask);
}
if (pen) {
global->fnDestroySyntheticPointerDevice(pen);
}
if (touch) {
global->fnDestroySyntheticPointerDevice(touch);
}
}
input_raw_t *global;
// Device state and handles for pen and touch input must be stored in the per-client
// input context, because each connected client may be sending their own independent
// pen/touch events. To maintain separation, we expose separate pen and touch devices
// for each client.
HSYNTHETICPOINTERDEVICE pen {};
POINTER_TYPE_INFO penInfo {};
thread_pool_util::ThreadPool::task_id_t penRepeatTask {};
HSYNTHETICPOINTERDEVICE touch {};
POINTER_TYPE_INFO touchInfo[10] {};
UINT32 activeTouchSlots {};
thread_pool_util::ThreadPool::task_id_t touchRepeatTask {};
};
/**
* @brief Allocates a context to store per-client input data.
* @param input The global input context.
* @return A unique pointer to a per-client input data context.
*/
std::unique_ptr<client_input_t>
allocate_client_input_context(input_t &input) {
return std::make_unique<client_input_raw_t>(input);
}
/**
* @brief Compacts the touch slots into a contiguous block and updates the active count.
* @details Since this swaps entries around, all slot pointers/references are invalid after compaction.
* @param raw The client-specific input context.
*/
void
perform_touch_compaction(client_input_raw_t *raw) {
// Windows requires all active touches be contiguous when fed into InjectSyntheticPointerInput().
UINT32 i;
for (i = 0; i < ARRAYSIZE(raw->touchInfo); i++) {
if (raw->touchInfo[i].touchInfo.pointerInfo.pointerFlags == POINTER_FLAG_NONE) {
// This is an empty slot. Look for a later entry to move into this slot.
for (UINT32 j = i + 1; j < ARRAYSIZE(raw->touchInfo); j++) {
if (raw->touchInfo[j].touchInfo.pointerInfo.pointerFlags != POINTER_FLAG_NONE) {
std::swap(raw->touchInfo[i], raw->touchInfo[j]);
break;
}
}
// If we didn't find anything, we've reached the end of active slots.
if (raw->touchInfo[i].touchInfo.pointerInfo.pointerFlags == POINTER_FLAG_NONE) {
break;
}
}
}
// Update the number of active touch slots
raw->activeTouchSlots = i;
}
/**
* @brief Gets a pointer slot by client-relative pointer ID, claiming a new one if necessary.
* @param raw The raw client-specific input context.
* @param pointerId The client's pointer ID.
* @param eventType The LI_TOUCH_EVENT value from the client.
* @return A pointer to the slot entry.
*/
POINTER_TYPE_INFO *
pointer_by_id(client_input_raw_t *raw, uint32_t pointerId, uint8_t eventType) {
// Compact active touches into a single contiguous block
perform_touch_compaction(raw);
// Try to find a matching pointer ID
for (UINT32 i = 0; i < ARRAYSIZE(raw->touchInfo); i++) {
if (raw->touchInfo[i].touchInfo.pointerInfo.pointerId == pointerId &&
raw->touchInfo[i].touchInfo.pointerInfo.pointerFlags != POINTER_FLAG_NONE) {
if (eventType == LI_TOUCH_EVENT_DOWN && (raw->touchInfo[i].touchInfo.pointerInfo.pointerFlags & POINTER_FLAG_INCONTACT)) {
BOOST_LOG(warning) << "Pointer "sv << pointerId << " already down. Did the client drop an up/cancel event?"sv;
}
return &raw->touchInfo[i];
}
}
if (eventType != LI_TOUCH_EVENT_HOVER && eventType != LI_TOUCH_EVENT_DOWN) {
BOOST_LOG(warning) << "Unexpected new pointer "sv << pointerId << " for event "sv << (uint32_t) eventType << ". Did the client drop a down/hover event?"sv;
}
// If there was none, grab an unused entry and increment the active slot count
for (UINT32 i = 0; i < ARRAYSIZE(raw->touchInfo); i++) {
if (raw->touchInfo[i].touchInfo.pointerInfo.pointerFlags == POINTER_FLAG_NONE) {
raw->touchInfo[i].touchInfo.pointerInfo.pointerId = pointerId;
raw->activeTouchSlots = i + 1;
return &raw->touchInfo[i];
}
}
return nullptr;
}
/**
* @brief Populate common `POINTER_INFO` members shared between pen and touch events.
* @param pointerInfo The pointer info to populate.
* @param touchPort The current viewport for translating to screen coordinates.
* @param eventType The type of touch/pen event.
* @param x The normalized 0.0-1.0 X coordinate.
* @param y The normalized 0.0-1.0 Y coordinate.
*/
void
populate_common_pointer_info(POINTER_INFO &pointerInfo, const touch_port_t &touchPort, uint8_t eventType, float x, float y) {
switch (eventType) {
case LI_TOUCH_EVENT_HOVER:
pointerInfo.pointerFlags &= ~POINTER_FLAG_INCONTACT;
pointerInfo.pointerFlags |= POINTER_FLAG_INRANGE | POINTER_FLAG_UPDATE;
pointerInfo.ptPixelLocation.x = x * touchPort.width + touchPort.offset_x;
pointerInfo.ptPixelLocation.y = y * touchPort.height + touchPort.offset_y;
break;
case LI_TOUCH_EVENT_DOWN:
pointerInfo.pointerFlags |= POINTER_FLAG_INRANGE | POINTER_FLAG_INCONTACT | POINTER_FLAG_DOWN;
pointerInfo.ptPixelLocation.x = x * touchPort.width + touchPort.offset_x;
pointerInfo.ptPixelLocation.y = y * touchPort.height + touchPort.offset_y;
break;
case LI_TOUCH_EVENT_UP:
// We expect to get another LI_TOUCH_EVENT_HOVER if the pointer remains in range
pointerInfo.pointerFlags &= ~(POINTER_FLAG_INCONTACT | POINTER_FLAG_INRANGE);
pointerInfo.pointerFlags |= POINTER_FLAG_UP;
break;
case LI_TOUCH_EVENT_MOVE:
pointerInfo.pointerFlags |= POINTER_FLAG_INRANGE | POINTER_FLAG_INCONTACT | POINTER_FLAG_UPDATE;
pointerInfo.ptPixelLocation.x = x * touchPort.width + touchPort.offset_x;
pointerInfo.ptPixelLocation.y = y * touchPort.height + touchPort.offset_y;
break;
case LI_TOUCH_EVENT_CANCEL:
case LI_TOUCH_EVENT_CANCEL_ALL:
// If we were in contact with the touch surface at the time of the cancellation,
// we'll set POINTER_FLAG_UP, otherwise set POINTER_FLAG_UPDATE.
if (pointerInfo.pointerFlags & POINTER_FLAG_INCONTACT) {
pointerInfo.pointerFlags |= POINTER_FLAG_UP;
}
else {
pointerInfo.pointerFlags |= POINTER_FLAG_UPDATE;
}
pointerInfo.pointerFlags &= ~(POINTER_FLAG_INCONTACT | POINTER_FLAG_INRANGE);
pointerInfo.pointerFlags |= POINTER_FLAG_CANCELED;
break;
case LI_TOUCH_EVENT_HOVER_LEAVE:
pointerInfo.pointerFlags &= ~(POINTER_FLAG_INCONTACT | POINTER_FLAG_INRANGE);
pointerInfo.pointerFlags |= POINTER_FLAG_UPDATE;
break;
case LI_TOUCH_EVENT_BUTTON_ONLY:
// On Windows, we can only pass buttons if we have an active pointer
if (pointerInfo.pointerFlags != POINTER_FLAG_NONE) {
pointerInfo.pointerFlags |= POINTER_FLAG_UPDATE;
}
break;
default:
BOOST_LOG(warning) << "Unknown touch event: "sv << (uint32_t) eventType;
break;
}
}
// Active pointer interactions sent via InjectSyntheticPointerInput() seem to be automatically
// cancelled by Windows if not repeated/updated within about a second. To avoid this, refresh
// the injected input periodically.
constexpr auto ISPI_REPEAT_INTERVAL = 50ms;
/**
* @brief Repeats the current touch state to avoid the interactions timing out.
* @param raw The raw client-specific input context.
*/
void
repeat_touch(client_input_raw_t *raw) {
if (!inject_synthetic_pointer_input(raw->global, raw->touch, raw->touchInfo, raw->activeTouchSlots)) {
auto err = GetLastError();
BOOST_LOG(warning) << "Failed to refresh virtual touch input: "sv << err;
}
raw->touchRepeatTask = task_pool.pushDelayed(repeat_touch, ISPI_REPEAT_INTERVAL, raw).task_id;
}
/**
* @brief Repeats the current pen state to avoid the interactions timing out.
* @param raw The raw client-specific input context.
*/
void
repeat_pen(client_input_raw_t *raw) {
if (!inject_synthetic_pointer_input(raw->global, raw->pen, &raw->penInfo, 1)) {
auto err = GetLastError();
BOOST_LOG(warning) << "Failed to refresh virtual pen input: "sv << err;
}
raw->penRepeatTask = task_pool.pushDelayed(repeat_pen, ISPI_REPEAT_INTERVAL, raw).task_id;
}
/**
* @brief Cancels all active touches.
* @param raw The raw client-specific input context.
*/
void
cancel_all_active_touches(client_input_raw_t *raw) {
// Cancel touch repeat callbacks
if (raw->touchRepeatTask) {
task_pool.cancel(raw->touchRepeatTask);
raw->touchRepeatTask = nullptr;
}
// Compact touches to update activeTouchSlots
perform_touch_compaction(raw);
// If we have active slots, cancel them all
if (raw->activeTouchSlots > 0) {
for (UINT32 i = 0; i < raw->activeTouchSlots; i++) {
populate_common_pointer_info(raw->touchInfo[i].touchInfo.pointerInfo, {}, LI_TOUCH_EVENT_CANCEL_ALL, 0.0f, 0.0f);
raw->touchInfo[i].touchInfo.touchMask = TOUCH_MASK_NONE;
}
if (!inject_synthetic_pointer_input(raw->global, raw->touch, raw->touchInfo, raw->activeTouchSlots)) {
auto err = GetLastError();
BOOST_LOG(warning) << "Failed to cancel all virtual touch input: "sv << err;
}
}
// Zero all touch state
std::memset(raw->touchInfo, 0, sizeof(raw->touchInfo));
raw->activeTouchSlots = 0;
}
// These are edge-triggered pointer state flags that should always be cleared next frame
constexpr auto EDGE_TRIGGERED_POINTER_FLAGS = POINTER_FLAG_DOWN | POINTER_FLAG_UP | POINTER_FLAG_CANCELED | POINTER_FLAG_UPDATE;
/**
* @brief Sends a touch event to the OS.
* @param input The client-specific input context.
* @param touch_port The current viewport for translating to screen coordinates.
* @param touch The touch event.
*/
void
touch_update(client_input_t *input, const touch_port_t &touch_port, const touch_input_t &touch) {
auto raw = (client_input_raw_t *) input;
// Bail if we're not running on an OS that supports virtual touch input
if (!raw->global->fnCreateSyntheticPointerDevice ||
!raw->global->fnInjectSyntheticPointerInput ||
!raw->global->fnDestroySyntheticPointerDevice) {
BOOST_LOG(warning) << "Touch input requires Windows 10 1809 or later"sv;
return;
}
// If there's not already a virtual touch device, create one now
if (!raw->touch) {
if (touch.eventType != LI_TOUCH_EVENT_CANCEL_ALL) {
BOOST_LOG(info) << "Creating virtual touch input device"sv;
raw->touch = raw->global->fnCreateSyntheticPointerDevice(PT_TOUCH, ARRAYSIZE(raw->touchInfo), POINTER_FEEDBACK_DEFAULT);
if (!raw->touch) {
auto err = GetLastError();
BOOST_LOG(warning) << "Failed to create virtual touch device: "sv << err;
return;
}
}
else {
// No need to cancel anything if we had no touch input device
return;
}
}
// Cancel touch repeat callbacks
if (raw->touchRepeatTask) {
task_pool.cancel(raw->touchRepeatTask);
raw->touchRepeatTask = nullptr;
}
// If this is a special request to cancel all touches, do that and return
if (touch.eventType == LI_TOUCH_EVENT_CANCEL_ALL) {
cancel_all_active_touches(raw);
return;
}
// Find or allocate an entry for this touch pointer ID
auto pointer = pointer_by_id(raw, touch.pointerId, touch.eventType);
if (!pointer) {
BOOST_LOG(error) << "No unused pointer entries! Cancelling all active touches!"sv;
cancel_all_active_touches(raw);
pointer = pointer_by_id(raw, touch.pointerId, touch.eventType);
}
pointer->type = PT_TOUCH;
auto &touchInfo = pointer->touchInfo;
touchInfo.pointerInfo.pointerType = PT_TOUCH;
// Populate shared pointer info fields
populate_common_pointer_info(touchInfo.pointerInfo, touch_port, touch.eventType, touch.x, touch.y);
touchInfo.touchMask = TOUCH_MASK_NONE;
// Pressure and contact area only apply to in-contact pointers.
//
// The clients also pass distance and tool size for hovers, but Windows doesn't
// provide APIs to receive that data.
if (touchInfo.pointerInfo.pointerFlags & POINTER_FLAG_INCONTACT) {
if (touch.pressureOrDistance != 0.0f) {
touchInfo.touchMask |= TOUCH_MASK_PRESSURE;
// Convert the 0.0f..1.0f float to the 0..1024 range that Windows uses
touchInfo.pressure = (UINT32) (touch.pressureOrDistance * 1024);
}
else {
// The default touch pressure is 512
touchInfo.pressure = 512;
}
if (touch.contactAreaMajor != 0.0f && touch.contactAreaMinor != 0.0f) {
// For the purposes of contact area calculation, we will assume the touches
// are at a 45 degree angle if rotation is unknown. This will scale the major
// axis value by width and height equally.
float rotationAngleDegs = touch.rotation == LI_ROT_UNKNOWN ? 45 : touch.rotation;
float majorAxisAngle = rotationAngleDegs * (M_PI / 180);
float minorAxisAngle = majorAxisAngle + (M_PI / 2);
// Estimate the contact rectangle
float contactWidth = (std::cos(majorAxisAngle) * touch.contactAreaMajor) + (std::cos(minorAxisAngle) * touch.contactAreaMinor);
float contactHeight = (std::sin(majorAxisAngle) * touch.contactAreaMajor) + (std::sin(minorAxisAngle) * touch.contactAreaMinor);
// Convert into screen coordinates centered at the touch location and constrained by screen dimensions
touchInfo.rcContact.left = std::max<LONG>(touch_port.offset_x, touchInfo.pointerInfo.ptPixelLocation.x - std::floor(contactWidth / 2));
touchInfo.rcContact.right = std::min<LONG>(touch_port.offset_x + touch_port.width, touchInfo.pointerInfo.ptPixelLocation.x + std::ceil(contactWidth / 2));
touchInfo.rcContact.top = std::max<LONG>(touch_port.offset_y, touchInfo.pointerInfo.ptPixelLocation.y - std::floor(contactHeight / 2));
touchInfo.rcContact.bottom = std::min<LONG>(touch_port.offset_y + touch_port.height, touchInfo.pointerInfo.ptPixelLocation.y + std::ceil(contactHeight / 2));
touchInfo.touchMask |= TOUCH_MASK_CONTACTAREA;
}
}
else {
touchInfo.pressure = 0;
touchInfo.rcContact = {};
}
if (touch.rotation != LI_ROT_UNKNOWN) {
touchInfo.touchMask |= TOUCH_MASK_ORIENTATION;
touchInfo.orientation = touch.rotation;
}
else {
touchInfo.orientation = 0;
}
if (!inject_synthetic_pointer_input(raw->global, raw->touch, raw->touchInfo, raw->activeTouchSlots)) {
auto err = GetLastError();
BOOST_LOG(warning) << "Failed to inject virtual touch input: "sv << err;
return;
}
// Clear pointer flags that should only remain set for one frame
touchInfo.pointerInfo.pointerFlags &= ~EDGE_TRIGGERED_POINTER_FLAGS;
// If we still have an active touch, refresh the touch state periodically
if (raw->activeTouchSlots > 1 || touchInfo.pointerInfo.pointerFlags != POINTER_FLAG_NONE) {
raw->touchRepeatTask = task_pool.pushDelayed(repeat_touch, ISPI_REPEAT_INTERVAL, raw).task_id;
}
}
/**
* @brief Sends a pen event to the OS.
* @param input The client-specific input context.
* @param touch_port The current viewport for translating to screen coordinates.
* @param pen The pen event.
*/
void
pen_update(client_input_t *input, const touch_port_t &touch_port, const pen_input_t &pen) {
auto raw = (client_input_raw_t *) input;
// Bail if we're not running on an OS that supports virtual pen input
if (!raw->global->fnCreateSyntheticPointerDevice ||
!raw->global->fnInjectSyntheticPointerInput ||
!raw->global->fnDestroySyntheticPointerDevice) {
BOOST_LOG(warning) << "Pen input requires Windows 10 1809 or later"sv;
return;
}
// If there's not already a virtual pen device, create one now
if (!raw->pen) {
if (pen.eventType != LI_TOUCH_EVENT_CANCEL_ALL) {
BOOST_LOG(info) << "Creating virtual pen input device"sv;
raw->pen = raw->global->fnCreateSyntheticPointerDevice(PT_PEN, 1, POINTER_FEEDBACK_DEFAULT);
if (!raw->pen) {
auto err = GetLastError();
BOOST_LOG(warning) << "Failed to create virtual pen device: "sv << err;
return;
}
}
else {
// No need to cancel anything if we had no pen input device
return;
}
}
// Cancel pen repeat callbacks
if (raw->penRepeatTask) {
task_pool.cancel(raw->penRepeatTask);
raw->penRepeatTask = nullptr;
}
raw->penInfo.type = PT_PEN;
auto &penInfo = raw->penInfo.penInfo;
penInfo.pointerInfo.pointerType = PT_PEN;
penInfo.pointerInfo.pointerId = 0;
// Populate shared pointer info fields
populate_common_pointer_info(penInfo.pointerInfo, touch_port, pen.eventType, pen.x, pen.y);
// Windows only supports a single pen button, so send all buttons as the barrel button
if (pen.penButtons) {
penInfo.penFlags |= PEN_FLAG_BARREL;
}
else {
penInfo.penFlags &= ~PEN_FLAG_BARREL;
}
switch (pen.toolType) {
default:
case LI_TOOL_TYPE_PEN:
penInfo.penFlags &= ~PEN_FLAG_ERASER;
break;
case LI_TOOL_TYPE_ERASER:
penInfo.penFlags |= PEN_FLAG_ERASER;
break;
case LI_TOOL_TYPE_UNKNOWN:
// Leave tool flags alone
break;
}
penInfo.penMask = PEN_MASK_NONE;
// Windows doesn't support hover distance, so only pass pressure/distance when the pointer is in contact
if ((penInfo.pointerInfo.pointerFlags & POINTER_FLAG_INCONTACT) && pen.pressureOrDistance != 0.0f) {
penInfo.penMask |= PEN_MASK_PRESSURE;
// Convert the 0.0f..1.0f float to the 0..1024 range that Windows uses
penInfo.pressure = (UINT32) (pen.pressureOrDistance * 1024);
}
else {
// The default pen pressure is 0
penInfo.pressure = 0;
}
if (pen.rotation != LI_ROT_UNKNOWN) {
penInfo.penMask |= PEN_MASK_ROTATION;
penInfo.rotation = pen.rotation;
}
else {
penInfo.rotation = 0;
}
// We require rotation and tilt to perform the conversion to X and Y tilt angles
if (pen.tilt != LI_TILT_UNKNOWN && pen.rotation != LI_ROT_UNKNOWN) {
auto rotationRads = pen.rotation * (M_PI / 180.f);
auto tiltRads = pen.tilt * (M_PI / 180.f);
auto r = std::sin(tiltRads);
auto z = std::cos(tiltRads);
// Convert polar coordinates into X and Y tilt angles
penInfo.penMask |= PEN_MASK_TILT_X | PEN_MASK_TILT_Y;
penInfo.tiltX = (INT32) (std::atan2(std::sin(-rotationRads) * r, z) * 180.f / M_PI);
penInfo.tiltY = (INT32) (std::atan2(std::cos(-rotationRads) * r, z) * 180.f / M_PI);
}
else {
penInfo.tiltX = 0;
penInfo.tiltY = 0;
}
if (!inject_synthetic_pointer_input(raw->global, raw->pen, &raw->penInfo, 1)) {
auto err = GetLastError();
BOOST_LOG(warning) << "Failed to inject virtual pen input: "sv << err;
return;
}
// Clear pointer flags that should only remain set for one frame
penInfo.pointerInfo.pointerFlags &= ~EDGE_TRIGGERED_POINTER_FLAGS;
// If we still have an active pen interaction, refresh the pen state periodically
if (penInfo.pointerInfo.pointerFlags != POINTER_FLAG_NONE) {
raw->penRepeatTask = task_pool.pushDelayed(repeat_pen, ISPI_REPEAT_INTERVAL, raw).task_id;
}
}
void
unicode(input_t &input, char *utf8, int size) {
// We can do no worse than one UTF-16 character per byte of UTF-8
WCHAR wide[size];
int chars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, utf8, size, wide, size);
if (chars <= 0) {
return;
}
// Send all key down events
for (int i = 0; i < chars; i++) {
INPUT input {};
input.type = INPUT_KEYBOARD;
input.ki.wScan = wide[i];
input.ki.dwFlags = KEYEVENTF_UNICODE;
send_input(input);
}
// Send all key up events
for (int i = 0; i < chars; i++) {
INPUT input {};
input.type = INPUT_KEYBOARD;
input.ki.wScan = wide[i];
input.ki.dwFlags = KEYEVENTF_UNICODE | KEYEVENTF_KEYUP;
send_input(input);
}
}
int
alloc_gamepad(input_t &input, const gamepad_id_t &id, const gamepad_arrival_t &metadata, feedback_queue_t feedback_queue) {
auto raw = (input_raw_t *) input.get();
if (!raw->vigem) {
return 0;
}
VIGEM_TARGET_TYPE selectedGamepadType;
if (config::input.gamepad == "x360"sv) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be Xbox 360 controller (manual selection)"sv;
selectedGamepadType = Xbox360Wired;
}
else if (config::input.gamepad == "ds4"sv) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be DualShock 4 controller (manual selection)"sv;
selectedGamepadType = DualShock4Wired;
}
else if (metadata.type == LI_CTYPE_PS) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be DualShock 4 controller (auto-selected by client-reported type)"sv;
selectedGamepadType = DualShock4Wired;
}
else if (metadata.type == LI_CTYPE_XBOX) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be Xbox 360 controller (auto-selected by client-reported type)"sv;
selectedGamepadType = Xbox360Wired;
}
else if (config::input.motion_as_ds4 && (metadata.capabilities & (LI_CCAP_ACCEL | LI_CCAP_GYRO))) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be DualShock 4 controller (auto-selected by motion sensor presence)"sv;
selectedGamepadType = DualShock4Wired;
}
else if (config::input.touchpad_as_ds4 && (metadata.capabilities & LI_CCAP_TOUCHPAD)) {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be DualShock 4 controller (auto-selected by touchpad presence)"sv;
selectedGamepadType = DualShock4Wired;
}
else {
BOOST_LOG(info) << "Gamepad " << id.globalIndex << " will be Xbox 360 controller (default)"sv;
selectedGamepadType = Xbox360Wired;
}
if (selectedGamepadType == Xbox360Wired) {
if (metadata.capabilities & (LI_CCAP_ACCEL | LI_CCAP_GYRO)) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " has motion sensors, but they are not usable when emulating an Xbox 360 controller"sv;
}
if (metadata.capabilities & LI_CCAP_TOUCHPAD) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " has a touchpad, but it is not usable when emulating an Xbox 360 controller"sv;
}
if (metadata.capabilities & LI_CCAP_RGB_LED) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " has an RGB LED, but it is not usable when emulating an Xbox 360 controller"sv;
}
}
else if (selectedGamepadType == DualShock4Wired) {
if (!(metadata.capabilities & (LI_CCAP_ACCEL | LI_CCAP_GYRO))) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " is emulating a DualShock 4 controller, but the client gamepad doesn't have motion sensors active"sv;
}
if (!(metadata.capabilities & LI_CCAP_TOUCHPAD)) {
BOOST_LOG(warning) << "Gamepad " << id.globalIndex << " is emulating a DualShock 4 controller, but the client gamepad doesn't have a touchpad"sv;
}
}
return raw->vigem->alloc_gamepad_internal(id, feedback_queue, selectedGamepadType);
}
void
free_gamepad(input_t &input, int nr) {
auto raw = (input_raw_t *) input.get();
if (!raw->vigem) {
return;
}
raw->vigem->free_target(nr);
}
/**
* @brief Converts the standard button flags into X360 format.
* @param gamepad_state The gamepad button/axis state sent from the client.
* @return XUSB_BUTTON flags.
*/
static XUSB_BUTTON
x360_buttons(const gamepad_state_t &gamepad_state) {
int buttons {};
auto flags = gamepad_state.buttonFlags;
if (flags & DPAD_UP) buttons |= XUSB_GAMEPAD_DPAD_UP;
if (flags & DPAD_DOWN) buttons |= XUSB_GAMEPAD_DPAD_DOWN;
if (flags & DPAD_LEFT) buttons |= XUSB_GAMEPAD_DPAD_LEFT;
if (flags & DPAD_RIGHT) buttons |= XUSB_GAMEPAD_DPAD_RIGHT;
if (flags & START) buttons |= XUSB_GAMEPAD_START;
if (flags & BACK) buttons |= XUSB_GAMEPAD_BACK;
if (flags & LEFT_STICK) buttons |= XUSB_GAMEPAD_LEFT_THUMB;
if (flags & RIGHT_STICK) buttons |= XUSB_GAMEPAD_RIGHT_THUMB;
if (flags & LEFT_BUTTON) buttons |= XUSB_GAMEPAD_LEFT_SHOULDER;
if (flags & RIGHT_BUTTON) buttons |= XUSB_GAMEPAD_RIGHT_SHOULDER;
if (flags & (HOME | MISC_BUTTON)) buttons |= XUSB_GAMEPAD_GUIDE;
if (flags & A) buttons |= XUSB_GAMEPAD_A;
if (flags & B) buttons |= XUSB_GAMEPAD_B;
if (flags & X) buttons |= XUSB_GAMEPAD_X;
if (flags & Y) buttons |= XUSB_GAMEPAD_Y;
return (XUSB_BUTTON) buttons;
}
/**
* @brief Updates the X360 input report with the provided gamepad state.
* @param gamepad The gamepad to update.
* @param gamepad_state The gamepad button/axis state sent from the client.
*/
static void
x360_update_state(gamepad_context_t &gamepad, const gamepad_state_t &gamepad_state) {
auto &report = gamepad.report.x360;
report.wButtons = x360_buttons(gamepad_state);
report.bLeftTrigger = gamepad_state.lt;
report.bRightTrigger = gamepad_state.rt;
report.sThumbLX = gamepad_state.lsX;
report.sThumbLY = gamepad_state.lsY;
report.sThumbRX = gamepad_state.rsX;
report.sThumbRY = gamepad_state.rsY;
}
static DS4_DPAD_DIRECTIONS
ds4_dpad(const gamepad_state_t &gamepad_state) {
auto flags = gamepad_state.buttonFlags;
if (flags & DPAD_UP) {
if (flags & DPAD_RIGHT) {
return DS4_BUTTON_DPAD_NORTHEAST;
}
else if (flags & DPAD_LEFT) {
return DS4_BUTTON_DPAD_NORTHWEST;
}
else {
return DS4_BUTTON_DPAD_NORTH;
}
}
else if (flags & DPAD_DOWN) {
if (flags & DPAD_RIGHT) {
return DS4_BUTTON_DPAD_SOUTHEAST;
}
else if (flags & DPAD_LEFT) {
return DS4_BUTTON_DPAD_SOUTHWEST;
}
else {
return DS4_BUTTON_DPAD_SOUTH;
}
}
else if (flags & DPAD_RIGHT) {
return DS4_BUTTON_DPAD_EAST;
}
else if (flags & DPAD_LEFT) {
return DS4_BUTTON_DPAD_WEST;
}
return DS4_BUTTON_DPAD_NONE;
}
/**
* @brief Converts the standard button flags into DS4 format.
* @param gamepad_state The gamepad button/axis state sent from the client.
* @return DS4_BUTTONS flags.
*/
static DS4_BUTTONS
ds4_buttons(const gamepad_state_t &gamepad_state) {
int buttons {};
auto flags = gamepad_state.buttonFlags;
if (flags & LEFT_STICK) buttons |= DS4_BUTTON_THUMB_LEFT;
if (flags & RIGHT_STICK) buttons |= DS4_BUTTON_THUMB_RIGHT;
if (flags & LEFT_BUTTON) buttons |= DS4_BUTTON_SHOULDER_LEFT;
if (flags & RIGHT_BUTTON) buttons |= DS4_BUTTON_SHOULDER_RIGHT;
if (flags & START) buttons |= DS4_BUTTON_OPTIONS;
if (flags & BACK) buttons |= DS4_BUTTON_SHARE;
if (flags & A) buttons |= DS4_BUTTON_CROSS;
if (flags & B) buttons |= DS4_BUTTON_CIRCLE;
if (flags & X) buttons |= DS4_BUTTON_SQUARE;
if (flags & Y) buttons |= DS4_BUTTON_TRIANGLE;
if (gamepad_state.lt > 0) buttons |= DS4_BUTTON_TRIGGER_LEFT;
if (gamepad_state.rt > 0) buttons |= DS4_BUTTON_TRIGGER_RIGHT;
return (DS4_BUTTONS) buttons;
}
static DS4_SPECIAL_BUTTONS
ds4_special_buttons(const gamepad_state_t &gamepad_state) {
int buttons {};
if (gamepad_state.buttonFlags & HOME) buttons |= DS4_SPECIAL_BUTTON_PS;
// Allow either PS4/PS5 clickpad button or Xbox Series X share button to activate DS4 clickpad
if (gamepad_state.buttonFlags & (TOUCHPAD_BUTTON | MISC_BUTTON)) buttons |= DS4_SPECIAL_BUTTON_TOUCHPAD;
// Manual DS4 emulation: check if BACK button should also trigger DS4 touchpad click
if (config::input.gamepad == "ds4"sv && config::input.ds4_back_as_touchpad_click && (gamepad_state.buttonFlags & BACK)) buttons |= DS4_SPECIAL_BUTTON_TOUCHPAD;
return (DS4_SPECIAL_BUTTONS) buttons;
}
static std::uint8_t
to_ds4_triggerX(std::int16_t v) {
return (v + std::numeric_limits<std::uint16_t>::max() / 2 + 1) / 257;
}
static std::uint8_t
to_ds4_triggerY(std::int16_t v) {
auto new_v = -((std::numeric_limits<std::uint16_t>::max() / 2 + v - 1)) / 257;
return new_v == 0 ? 0xFF : (std::uint8_t) new_v;
}
/**
* @brief Updates the DS4 input report with the provided gamepad state.
* @param gamepad The gamepad to update.
* @param gamepad_state The gamepad button/axis state sent from the client.
*/
static void
ds4_update_state(gamepad_context_t &gamepad, const gamepad_state_t &gamepad_state) {
auto &report = gamepad.report.ds4.Report;
report.wButtons = static_cast<uint16_t>(ds4_buttons(gamepad_state)) | static_cast<uint16_t>(ds4_dpad(gamepad_state));
report.bSpecial = ds4_special_buttons(gamepad_state);
report.bTriggerL = gamepad_state.lt;
report.bTriggerR = gamepad_state.rt;
report.bThumbLX = to_ds4_triggerX(gamepad_state.lsX);
report.bThumbLY = to_ds4_triggerY(gamepad_state.lsY);
report.bThumbRX = to_ds4_triggerX(gamepad_state.rsX);
report.bThumbRY = to_ds4_triggerY(gamepad_state.rsY);
}
/**
* @brief Sends DS4 input with updated timestamps and repeats to keep timestamp updated.
* @details Some applications require updated timestamps values to register DS4 input.
* @param vigem The global ViGEm context object.
* @param nr The global gamepad index.
*/
void
ds4_update_ts_and_send(vigem_t *vigem, int nr) {
auto &gamepad = vigem->gamepads[nr];
// Cancel any pending updates. We will requeue one here when we're finished.
if (gamepad.repeat_task) {
task_pool.cancel(gamepad.repeat_task);
gamepad.repeat_task = 0;
}
if (gamepad.gp && vigem_target_is_attached(gamepad.gp.get())) {
auto now = std::chrono::steady_clock::now();
auto delta_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(now - gamepad.last_report_ts);
// Timestamp is reported in 5.333us units
gamepad.report.ds4.Report.wTimestamp += (uint16_t) (delta_ns.count() / 5333);
// Send the report to the virtual device
auto status = vigem_target_ds4_update_ex(vigem->client.get(), gamepad.gp.get(), gamepad.report.ds4);
if (!VIGEM_SUCCESS(status)) {
BOOST_LOG(warning) << "Couldn't send gamepad input to ViGEm ["sv << util::hex(status).to_string_view() << ']';
return;
}
// Repeat at least every 100ms to keep the 16-bit timestamp field from overflowing
gamepad.last_report_ts = now;
gamepad.repeat_task = task_pool.pushDelayed(ds4_update_ts_and_send, 100ms, vigem, nr).task_id;
}
}
/**
* @brief Updates virtual gamepad with the provided gamepad state.
* @param input The input context.
* @param nr The gamepad index to update.
* @param gamepad_state The gamepad button/axis state sent from the client.
*/
void
gamepad_update(input_t &input, int nr, const gamepad_state_t &gamepad_state) {
auto vigem = ((input_raw_t *) input.get())->vigem;
// If there is no gamepad support
if (!vigem) {
return;
}
auto &gamepad = vigem->gamepads[nr];
if (!gamepad.gp) {
return;
}
VIGEM_ERROR status;
if (vigem_target_get_type(gamepad.gp.get()) == Xbox360Wired) {
x360_update_state(gamepad, gamepad_state);
status = vigem_target_x360_update(vigem->client.get(), gamepad.gp.get(), gamepad.report.x360);
if (!VIGEM_SUCCESS(status)) {
BOOST_LOG(warning) << "Couldn't send gamepad input to ViGEm ["sv << util::hex(status).to_string_view() << ']';
}
}
else {
ds4_update_state(gamepad, gamepad_state);
ds4_update_ts_and_send(vigem, nr);
}
}
/**
* @brief Sends a gamepad touch event to the OS.
* @param input The global input context.
* @param touch The touch event.
*/
void
gamepad_touch(input_t &input, const gamepad_touch_t &touch) {
auto vigem = ((input_raw_t *) input.get())->vigem;
// If there is no gamepad support
if (!vigem) {
return;
}
auto &gamepad = vigem->gamepads[touch.id.globalIndex];
if (!gamepad.gp) {
return;
}
// Touch is only supported on DualShock 4 controllers
if (vigem_target_get_type(gamepad.gp.get()) != DualShock4Wired) {
return;
}
auto &report = gamepad.report.ds4.Report;
uint8_t pointerIndex;
if (touch.eventType == LI_TOUCH_EVENT_DOWN) {
if (gamepad.available_pointers & 0x1) {
// Reserve pointer index 0 for this touch
gamepad.pointer_id_map[touch.pointerId] = pointerIndex = 0;
gamepad.available_pointers &= ~(1 << pointerIndex);
// Set pointer 0 down
report.sCurrentTouch.bIsUpTrackingNum1 &= ~0x80;
report.sCurrentTouch.bIsUpTrackingNum1++;
}
else if (gamepad.available_pointers & 0x2) {
// Reserve pointer index 1 for this touch
gamepad.pointer_id_map[touch.pointerId] = pointerIndex = 1;
gamepad.available_pointers &= ~(1 << pointerIndex);
// Set pointer 1 down
report.sCurrentTouch.bIsUpTrackingNum2 &= ~0x80;
report.sCurrentTouch.bIsUpTrackingNum2++;
}
else {
BOOST_LOG(warning) << "No more free pointer indices! Did the client miss an touch up event?"sv;
return;
}
}
else if (touch.eventType == LI_TOUCH_EVENT_CANCEL_ALL) {
// Raise both pointers
report.sCurrentTouch.bIsUpTrackingNum1 |= 0x80;
report.sCurrentTouch.bIsUpTrackingNum2 |= 0x80;
// Remove all pointer index mappings
gamepad.pointer_id_map.clear();
// All pointers are now available
gamepad.available_pointers = 0x3;
}
else {
auto i = gamepad.pointer_id_map.find(touch.pointerId);
if (i == gamepad.pointer_id_map.end()) {
BOOST_LOG(warning) << "Pointer ID not found! Did the client miss a touch down event?"sv;
return;
}
pointerIndex = (*i).second;
if (touch.eventType == LI_TOUCH_EVENT_UP || touch.eventType == LI_TOUCH_EVENT_CANCEL) {
// Remove the pointer index mapping
gamepad.pointer_id_map.erase(i);
// Set pointer up
if (pointerIndex == 0) {
report.sCurrentTouch.bIsUpTrackingNum1 |= 0x80;
}
else {
report.sCurrentTouch.bIsUpTrackingNum2 |= 0x80;
}
// Free the pointer index
gamepad.available_pointers |= (1 << pointerIndex);
}
else if (touch.eventType != LI_TOUCH_EVENT_MOVE) {
BOOST_LOG(warning) << "Unsupported touch event for gamepad: "sv << (uint32_t) touch.eventType;
return;
}
}
// Touchpad is 1920x943 according to ViGEm
uint16_t x = touch.x * 1920;
uint16_t y = touch.y * 943;
uint8_t touchData[] = {
(uint8_t) (x & 0xFF), // Low 8 bits of X
(uint8_t) (((x >> 8) & 0x0F) | ((y & 0x0F) << 4)), // High 4 bits of X and low 4 bits of Y
(uint8_t) (((y >> 4) & 0xFF)) // High 8 bits of Y
};
report.sCurrentTouch.bPacketCounter++;
if (touch.eventType != LI_TOUCH_EVENT_CANCEL_ALL) {
if (pointerIndex == 0) {
memcpy(report.sCurrentTouch.bTouchData1, touchData, sizeof(touchData));
}
else {
memcpy(report.sCurrentTouch.bTouchData2, touchData, sizeof(touchData));
}
}
ds4_update_ts_and_send(vigem, touch.id.globalIndex);
}
/**
* @brief Sends a gamepad motion event to the OS.
* @param input The global input context.
* @param motion The motion event.
*/
void
gamepad_motion(input_t &input, const gamepad_motion_t &motion) {
auto vigem = ((input_raw_t *) input.get())->vigem;
// If there is no gamepad support
if (!vigem) {
return;
}
auto &gamepad = vigem->gamepads[motion.id.globalIndex];
if (!gamepad.gp) {
return;
}
// Motion is only supported on DualShock 4 controllers
if (vigem_target_get_type(gamepad.gp.get()) != DualShock4Wired) {
return;
}
ds4_update_motion(gamepad, motion.motionType, motion.x, motion.y, motion.z);
ds4_update_ts_and_send(vigem, motion.id.globalIndex);
}
/**
* @brief Sends a gamepad battery event to the OS.
* @param input The global input context.
* @param battery The battery event.
*/
void
gamepad_battery(input_t &input, const gamepad_battery_t &battery) {
auto vigem = ((input_raw_t *) input.get())->vigem;
// If there is no gamepad support
if (!vigem) {
return;
}
auto &gamepad = vigem->gamepads[battery.id.globalIndex];
if (!gamepad.gp) {
return;
}
// Battery is only supported on DualShock 4 controllers
if (vigem_target_get_type(gamepad.gp.get()) != DualShock4Wired) {
return;
}
// For details on the report format of these battery level fields, see:
// https://github.com/torvalds/linux/blob/946c6b59c56dc6e7d8364a8959cb36bf6d10bc37/drivers/hid/hid-playstation.c#L2305-L2314
auto &report = gamepad.report.ds4.Report;
// Update the battery state if it is known
switch (battery.state) {
case LI_BATTERY_STATE_CHARGING:
case LI_BATTERY_STATE_DISCHARGING:
if (battery.state == LI_BATTERY_STATE_CHARGING) {
report.bBatteryLvlSpecial |= 0x10; // Connected via USB
}
else {
report.bBatteryLvlSpecial &= ~0x10; // Not connected via USB
}
// If there was a special battery status set before, clear that and
// initialize the battery level to 50%. It will be overwritten below
// if the actual percentage is known.
if ((report.bBatteryLvlSpecial & 0xF) > 0xA) {
report.bBatteryLvlSpecial = (report.bBatteryLvlSpecial & ~0xF) | 0x5;
}
break;
case LI_BATTERY_STATE_FULL:
report.bBatteryLvlSpecial = 0x1B; // USB + Battery Full
report.bBatteryLvl = 0xFF;
break;
case LI_BATTERY_STATE_NOT_PRESENT:
case LI_BATTERY_STATE_NOT_CHARGING:
report.bBatteryLvlSpecial = 0x1F; // USB + Charging Error
break;
default:
break;
}
// Update the battery level if it is known
if (battery.percentage != LI_BATTERY_PERCENTAGE_UNKNOWN) {
report.bBatteryLvl = battery.percentage * 255 / 100;
// Don't overwrite low nibble if there's a special status there (see above)
if ((report.bBatteryLvlSpecial & 0x10) && (report.bBatteryLvlSpecial & 0xF) <= 0xA) {
report.bBatteryLvlSpecial = (report.bBatteryLvlSpecial & ~0xF) | ((battery.percentage + 5) / 10);
}
}
ds4_update_ts_and_send(vigem, battery.id.globalIndex);
}
void
freeInput(void *p) {
auto input = (input_raw_t *) p;
delete input;
}
std::vector<supported_gamepad_t> &
supported_gamepads(input_t *input) {
if (!input) {
static std::vector gps {
supported_gamepad_t { "auto", true, "" },
supported_gamepad_t { "x360", false, "" },
supported_gamepad_t { "ds4", false, "" },
};
return gps;
}
auto vigem = ((input_raw_t *) input)->vigem;
auto enabled = vigem != nullptr;
auto reason = enabled ? "" : "gamepads.vigem-not-available";
// ds4 == ps4
static std::vector gps {
supported_gamepad_t { "auto", true, reason },
supported_gamepad_t { "x360", enabled, reason },
supported_gamepad_t { "ds4", enabled, reason }
};
for (auto &[name, is_enabled, reason_disabled] : gps) {
if (!is_enabled) {
BOOST_LOG(warning) << "Gamepad " << name << " is disabled due to " << reason_disabled;
}
}
return gps;
}
/**
* @brief Returns the supported platform capabilities to advertise to the client.
* @return Capability flags.
*/
platform_caps::caps_t
get_capabilities() {
platform_caps::caps_t caps = 0;
// We support controller touchpad input as long as we're not emulating X360
if (config::input.gamepad != "x360"sv) {
caps |= platform_caps::controller_touch;
}
// We support pen and touch input on Win10 1809+
if (GetProcAddress(GetModuleHandleA("user32.dll"), "CreateSyntheticPointerDevice") != nullptr) {
if (config::input.native_pen_touch) {
caps |= platform_caps::pen_touch;
}
}
else {
BOOST_LOG(warning) << "Touch input requires Windows 10 1809 or later"sv;
}
return caps;
}
} // namespace platf
| 62,084
|
C++
|
.cpp
| 1,519
| 34.685319
| 166
| 0.655472
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,016
|
display_ram.cpp
|
LizardByte_Sunshine/src/platform/windows/display_ram.cpp
|
/**
* @file src/platform/windows/display_ram.cpp
* @brief Definitions for handling ram.
*/
#include "display.h"
#include "misc.h"
#include "src/logging.h"
namespace platf {
using namespace std::literals;
}
namespace platf::dxgi {
struct img_t: public ::platf::img_t {
~img_t() override {
delete[] data;
data = nullptr;
}
};
void
blend_cursor_monochrome(const cursor_t &cursor, img_t &img) {
int height = cursor.shape_info.Height / 2;
int width = cursor.shape_info.Width;
int pitch = cursor.shape_info.Pitch;
// img cursor.{x,y} < 0, skip parts of the cursor.img_data
auto cursor_skip_y = -std::min(0, cursor.y);
auto cursor_skip_x = -std::min(0, cursor.x);
// img cursor.{x,y} > img.{x,y}, truncate parts of the cursor.img_data
auto cursor_truncate_y = std::max(0, cursor.y - img.height);
auto cursor_truncate_x = std::max(0, cursor.x - img.width);
auto cursor_width = width - cursor_skip_x - cursor_truncate_x;
auto cursor_height = height - cursor_skip_y - cursor_truncate_y;
if (cursor_height > height || cursor_width > width) {
return;
}
auto img_skip_y = std::max(0, cursor.y);
auto img_skip_x = std::max(0, cursor.x);
auto cursor_img_data = cursor.img_data.data() + cursor_skip_y * pitch;
int delta_height = std::min(cursor_height - cursor_truncate_y, std::max(0, img.height - img_skip_y));
int delta_width = std::min(cursor_width - cursor_truncate_x, std::max(0, img.width - img_skip_x));
auto pixels_per_byte = width / pitch;
auto bytes_per_row = delta_width / pixels_per_byte;
auto img_data = (int *) img.data;
for (int i = 0; i < delta_height; ++i) {
auto and_mask = &cursor_img_data[i * pitch];
auto xor_mask = &cursor_img_data[(i + height) * pitch];
auto img_pixel_p = &img_data[(i + img_skip_y) * (img.row_pitch / img.pixel_pitch) + img_skip_x];
auto skip_x = cursor_skip_x;
for (int x = 0; x < bytes_per_row; ++x) {
for (auto bit = 0u; bit < 8; ++bit) {
if (skip_x > 0) {
--skip_x;
continue;
}
int and_ = *and_mask & (1 << (7 - bit)) ? -1 : 0;
int xor_ = *xor_mask & (1 << (7 - bit)) ? -1 : 0;
*img_pixel_p &= and_;
*img_pixel_p ^= xor_;
++img_pixel_p;
}
++and_mask;
++xor_mask;
}
}
}
void
apply_color_alpha(int *img_pixel_p, int cursor_pixel) {
auto colors_out = (std::uint8_t *) &cursor_pixel;
auto colors_in = (std::uint8_t *) img_pixel_p;
// TODO: When use of IDXGIOutput5 is implemented, support different color formats
auto alpha = colors_out[3];
if (alpha == 255) {
*img_pixel_p = cursor_pixel;
}
else {
colors_in[0] = colors_out[0] + (colors_in[0] * (255 - alpha) + 255 / 2) / 255;
colors_in[1] = colors_out[1] + (colors_in[1] * (255 - alpha) + 255 / 2) / 255;
colors_in[2] = colors_out[2] + (colors_in[2] * (255 - alpha) + 255 / 2) / 255;
}
}
void
apply_color_masked(int *img_pixel_p, int cursor_pixel) {
// TODO: When use of IDXGIOutput5 is implemented, support different color formats
auto alpha = ((std::uint8_t *) &cursor_pixel)[3];
if (alpha == 0xFF) {
*img_pixel_p ^= cursor_pixel;
}
else {
*img_pixel_p = cursor_pixel;
}
}
void
blend_cursor_color(const cursor_t &cursor, img_t &img, const bool masked) {
int height = cursor.shape_info.Height;
int width = cursor.shape_info.Width;
int pitch = cursor.shape_info.Pitch;
// img cursor.y < 0, skip parts of the cursor.img_data
auto cursor_skip_y = -std::min(0, cursor.y);
auto cursor_skip_x = -std::min(0, cursor.x);
// img cursor.{x,y} > img.{x,y}, truncate parts of the cursor.img_data
auto cursor_truncate_y = std::max(0, cursor.y - img.height);
auto cursor_truncate_x = std::max(0, cursor.x - img.width);
auto img_skip_y = std::max(0, cursor.y);
auto img_skip_x = std::max(0, cursor.x);
auto cursor_width = width - cursor_skip_x - cursor_truncate_x;
auto cursor_height = height - cursor_skip_y - cursor_truncate_y;
if (cursor_height > height || cursor_width > width) {
return;
}
auto cursor_img_data = (int *) &cursor.img_data[cursor_skip_y * pitch];
int delta_height = std::min(cursor_height - cursor_truncate_y, std::max(0, img.height - img_skip_y));
int delta_width = std::min(cursor_width - cursor_truncate_x, std::max(0, img.width - img_skip_x));
auto img_data = (int *) img.data;
for (int i = 0; i < delta_height; ++i) {
auto cursor_begin = &cursor_img_data[i * cursor.shape_info.Width + cursor_skip_x];
auto cursor_end = &cursor_begin[delta_width];
auto img_pixel_p = &img_data[(i + img_skip_y) * (img.row_pitch / img.pixel_pitch) + img_skip_x];
std::for_each(cursor_begin, cursor_end, [&](int cursor_pixel) {
if (masked) {
apply_color_masked(img_pixel_p, cursor_pixel);
}
else {
apply_color_alpha(img_pixel_p, cursor_pixel);
}
++img_pixel_p;
});
}
}
void
blend_cursor(const cursor_t &cursor, img_t &img) {
switch (cursor.shape_info.Type) {
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR:
blend_cursor_color(cursor, img, false);
break;
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME:
blend_cursor_monochrome(cursor, img);
break;
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR:
blend_cursor_color(cursor, img, true);
break;
default:
BOOST_LOG(warning) << "Unsupported cursor format ["sv << cursor.shape_info.Type << ']';
}
}
capture_e
display_ddup_ram_t::snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) {
HRESULT status;
DXGI_OUTDUPL_FRAME_INFO frame_info;
resource_t::pointer res_p {};
auto capture_status = dup.next_frame(frame_info, timeout, &res_p);
resource_t res { res_p };
if (capture_status != capture_e::ok) {
return capture_status;
}
const bool mouse_update_flag = frame_info.LastMouseUpdateTime.QuadPart != 0 || frame_info.PointerShapeBufferSize > 0;
const bool frame_update_flag = frame_info.AccumulatedFrames != 0 || frame_info.LastPresentTime.QuadPart != 0;
const bool update_flag = mouse_update_flag || frame_update_flag;
if (!update_flag) {
return capture_e::timeout;
}
std::optional<std::chrono::steady_clock::time_point> frame_timestamp;
if (auto qpc_displayed = std::max(frame_info.LastPresentTime.QuadPart, frame_info.LastMouseUpdateTime.QuadPart)) {
// Translate QueryPerformanceCounter() value to steady_clock time point
frame_timestamp = std::chrono::steady_clock::now() - qpc_time_difference(qpc_counter(), qpc_displayed);
}
if (frame_info.PointerShapeBufferSize > 0) {
auto &img_data = cursor.img_data;
img_data.resize(frame_info.PointerShapeBufferSize);
UINT dummy;
status = dup.dup->GetFramePointerShape(img_data.size(), img_data.data(), &dummy, &cursor.shape_info);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to get new pointer shape [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
}
if (frame_info.LastMouseUpdateTime.QuadPart) {
cursor.x = frame_info.PointerPosition.Position.x;
cursor.y = frame_info.PointerPosition.Position.y;
cursor.visible = frame_info.PointerPosition.Visible;
}
if (frame_update_flag) {
{
texture2d_t src {};
status = res->QueryInterface(IID_ID3D11Texture2D, (void **) &src);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't query interface [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
D3D11_TEXTURE2D_DESC desc;
src->GetDesc(&desc);
// If we don't know the capture format yet, grab it from this texture and create the staging texture
if (capture_format == DXGI_FORMAT_UNKNOWN) {
capture_format = desc.Format;
BOOST_LOG(info) << "Capture format ["sv << dxgi_format_to_string(capture_format) << ']';
D3D11_TEXTURE2D_DESC t {};
t.Width = width;
t.Height = height;
t.MipLevels = 1;
t.ArraySize = 1;
t.SampleDesc.Count = 1;
t.Usage = D3D11_USAGE_STAGING;
t.Format = capture_format;
t.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
auto status = device->CreateTexture2D(&t, nullptr, &texture);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create staging texture [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
}
// It's possible for our display enumeration to race with mode changes and result in
// mismatched image pool and desktop texture sizes. If this happens, just reinit again.
if (desc.Width != width || desc.Height != height) {
BOOST_LOG(info) << "Capture size changed ["sv << width << 'x' << height << " -> "sv << desc.Width << 'x' << desc.Height << ']';
return capture_e::reinit;
}
// It's also possible for the capture format to change on the fly. If that happens,
// reinitialize capture to try format detection again and create new images.
if (capture_format != desc.Format) {
BOOST_LOG(info) << "Capture format changed ["sv << dxgi_format_to_string(capture_format) << " -> "sv << dxgi_format_to_string(desc.Format) << ']';
return capture_e::reinit;
}
// Copy from GPU to CPU
device_ctx->CopyResource(texture.get(), src.get());
}
}
if (!pull_free_image_cb(img_out)) {
return capture_e::interrupted;
}
auto img = (img_t *) img_out.get();
// If we don't know the final capture format yet, encode a dummy image
if (capture_format == DXGI_FORMAT_UNKNOWN) {
BOOST_LOG(debug) << "Capture format is still unknown. Encoding a blank image"sv;
if (dummy_img(img)) {
return capture_e::error;
}
}
else {
// Map the staging texture for CPU access (making it inaccessible for the GPU)
status = device_ctx->Map(texture.get(), 0, D3D11_MAP_READ, 0, &img_info);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to map texture [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
// Now that we know the capture format, we can finish creating the image
if (complete_img(img, false)) {
device_ctx->Unmap(texture.get(), 0);
img_info.pData = nullptr;
return capture_e::error;
}
std::copy_n((std::uint8_t *) img_info.pData, height * img_info.RowPitch, (std::uint8_t *) img->data);
// Unmap the staging texture to allow GPU access again
device_ctx->Unmap(texture.get(), 0);
img_info.pData = nullptr;
}
if (cursor_visible && cursor.visible) {
blend_cursor(cursor, *img);
}
if (img) {
img->frame_timestamp = frame_timestamp;
}
return capture_e::ok;
}
capture_e
display_ddup_ram_t::release_snapshot() {
return dup.release_frame();
}
std::shared_ptr<platf::img_t>
display_ram_t::alloc_img() {
auto img = std::make_shared<img_t>();
// Initialize fields that are format-independent
img->width = width;
img->height = height;
return img;
}
int
display_ram_t::complete_img(platf::img_t *img, bool dummy) {
// If this is not a dummy image, we must know the format by now
if (!dummy && capture_format == DXGI_FORMAT_UNKNOWN) {
BOOST_LOG(error) << "display_ram_t::complete_img() called with unknown capture format!";
return -1;
}
img->pixel_pitch = get_pixel_pitch();
if (dummy && !img->row_pitch) {
// Assume our dummy image will have no padding
img->row_pitch = img->pixel_pitch * img->width;
}
// Reallocate the image buffer if the pitch changes
if (!dummy && img->row_pitch != img_info.RowPitch) {
img->row_pitch = img_info.RowPitch;
delete img->data;
img->data = nullptr;
}
if (!img->data) {
img->data = new std::uint8_t[img->row_pitch * height];
}
return 0;
}
/**
* @memberof platf::dxgi::display_ram_t
*/
int
display_ram_t::dummy_img(platf::img_t *img) {
if (complete_img(img, true)) {
return -1;
}
std::fill_n((std::uint8_t *) img->data, height * img->row_pitch, 0);
return 0;
}
std::vector<DXGI_FORMAT>
display_ram_t::get_supported_capture_formats() {
return { DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM };
}
int
display_ddup_ram_t::init(const ::video::config_t &config, const std::string &display_name) {
if (display_base_t::init(config, display_name) || dup.init(this, config)) {
return -1;
}
return 0;
}
std::unique_ptr<avcodec_encode_device_t>
display_ram_t::make_avcodec_encode_device(pix_fmt_e pix_fmt) {
return std::make_unique<avcodec_encode_device_t>();
}
} // namespace platf::dxgi
| 13,321
|
C++
|
.cpp
| 322
| 34.92236
| 176
| 0.618613
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,017
|
audio.cpp
|
LizardByte_Sunshine/src/platform/windows/audio.cpp
|
/**
* @file src/platform/windows/audio.cpp
* @brief Definitions for Windows audio capture.
*/
#define INITGUID
#include <audioclient.h>
#include <mmdeviceapi.h>
#include <roapi.h>
#include <synchapi.h>
#include <newdev.h>
#include <avrt.h>
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "misc.h"
// Must be the last included file
// clang-format off
#include "PolicyConfig.h"
// clang-format on
DEFINE_PROPERTYKEY(PKEY_Device_DeviceDesc, 0xa45c254e, 0xdf1c, 0x4efd, 0x80, 0x20, 0x67, 0xd1, 0x46, 0xa8, 0x50, 0xe0, 2); // DEVPROP_TYPE_STRING
DEFINE_PROPERTYKEY(PKEY_Device_FriendlyName, 0xa45c254e, 0xdf1c, 0x4efd, 0x80, 0x20, 0x67, 0xd1, 0x46, 0xa8, 0x50, 0xe0, 14); // DEVPROP_TYPE_STRING
DEFINE_PROPERTYKEY(PKEY_DeviceInterface_FriendlyName, 0x026e516e, 0xb814, 0x414b, 0x83, 0xcd, 0x85, 0x6d, 0x6f, 0xef, 0x48, 0x22, 2);
#if defined(__x86_64) || defined(_M_AMD64)
#define STEAM_DRIVER_SUBDIR L"x64"
#elif defined(__i386) || defined(_M_IX86)
#define STEAM_DRIVER_SUBDIR L"x86"
#else
#warning No known Steam audio driver for this architecture
#endif
namespace {
constexpr auto SAMPLE_RATE = 48000;
constexpr auto STEAM_AUDIO_DRIVER_PATH = L"%CommonProgramFiles(x86)%\\Steam\\drivers\\Windows10\\" STEAM_DRIVER_SUBDIR L"\\SteamStreamingSpeakers.inf";
constexpr auto waveformat_mask_stereo = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;
constexpr auto waveformat_mask_surround51_with_backspeakers = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY |
SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT;
constexpr auto waveformat_mask_surround51_with_sidespeakers = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY |
SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT;
constexpr auto waveformat_mask_surround71 = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY |
SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT |
SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT;
enum class sample_format_e {
f32,
s32,
s24in32,
s24,
s16,
_size,
};
constexpr WAVEFORMATEXTENSIBLE
create_waveformat(sample_format_e sample_format, WORD channel_count, DWORD channel_mask) {
WAVEFORMATEXTENSIBLE waveformat = {};
switch (sample_format) {
default:
case sample_format_e::f32:
waveformat.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
waveformat.Format.wBitsPerSample = 32;
waveformat.Samples.wValidBitsPerSample = 32;
break;
case sample_format_e::s32:
waveformat.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
waveformat.Format.wBitsPerSample = 32;
waveformat.Samples.wValidBitsPerSample = 32;
break;
case sample_format_e::s24in32:
waveformat.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
waveformat.Format.wBitsPerSample = 32;
waveformat.Samples.wValidBitsPerSample = 24;
break;
case sample_format_e::s24:
waveformat.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
waveformat.Format.wBitsPerSample = 24;
waveformat.Samples.wValidBitsPerSample = 24;
break;
case sample_format_e::s16:
waveformat.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
waveformat.Format.wBitsPerSample = 16;
waveformat.Samples.wValidBitsPerSample = 16;
break;
}
static_assert((int) sample_format_e::_size == 5, "Unrecognized sample_format_e");
waveformat.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
waveformat.Format.nChannels = channel_count;
waveformat.Format.nSamplesPerSec = SAMPLE_RATE;
waveformat.Format.nBlockAlign = waveformat.Format.nChannels * waveformat.Format.wBitsPerSample / 8;
waveformat.Format.nAvgBytesPerSec = waveformat.Format.nSamplesPerSec * waveformat.Format.nBlockAlign;
waveformat.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
waveformat.dwChannelMask = channel_mask;
return waveformat;
}
using virtual_sink_waveformats_t = std::vector<WAVEFORMATEXTENSIBLE>;
template <WORD channel_count>
virtual_sink_waveformats_t
create_virtual_sink_waveformats() {
if constexpr (channel_count == 2) {
auto channel_mask = waveformat_mask_stereo;
// only choose 24 or 16-bit formats to avoid clobbering existing Dolby/DTS spatial audio settings
return {
create_waveformat(sample_format_e::s24in32, channel_count, channel_mask),
create_waveformat(sample_format_e::s24, channel_count, channel_mask),
create_waveformat(sample_format_e::s16, channel_count, channel_mask),
};
}
else if (channel_count == 6) {
auto channel_mask1 = waveformat_mask_surround51_with_backspeakers;
auto channel_mask2 = waveformat_mask_surround51_with_sidespeakers;
return {
create_waveformat(sample_format_e::f32, channel_count, channel_mask1),
create_waveformat(sample_format_e::f32, channel_count, channel_mask2),
create_waveformat(sample_format_e::s32, channel_count, channel_mask1),
create_waveformat(sample_format_e::s32, channel_count, channel_mask2),
create_waveformat(sample_format_e::s24in32, channel_count, channel_mask1),
create_waveformat(sample_format_e::s24in32, channel_count, channel_mask2),
create_waveformat(sample_format_e::s24, channel_count, channel_mask1),
create_waveformat(sample_format_e::s24, channel_count, channel_mask2),
create_waveformat(sample_format_e::s16, channel_count, channel_mask1),
create_waveformat(sample_format_e::s16, channel_count, channel_mask2),
};
}
else if (channel_count == 8) {
auto channel_mask = waveformat_mask_surround71;
return {
create_waveformat(sample_format_e::f32, channel_count, channel_mask),
create_waveformat(sample_format_e::s32, channel_count, channel_mask),
create_waveformat(sample_format_e::s24in32, channel_count, channel_mask),
create_waveformat(sample_format_e::s24, channel_count, channel_mask),
create_waveformat(sample_format_e::s16, channel_count, channel_mask),
};
}
}
std::string
waveformat_to_pretty_string(const WAVEFORMATEXTENSIBLE &waveformat) {
std::string result = waveformat.SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ? "F" :
waveformat.SubFormat == KSDATAFORMAT_SUBTYPE_PCM ? "S" :
"UNKNOWN";
result += std::to_string(waveformat.Samples.wValidBitsPerSample) + " " +
std::to_string(waveformat.Format.nSamplesPerSec) + " ";
switch (waveformat.dwChannelMask) {
case (waveformat_mask_stereo):
result += "2.0";
break;
case (waveformat_mask_surround51_with_backspeakers):
result += "5.1";
break;
case (waveformat_mask_surround51_with_sidespeakers):
result += "5.1 (sidespeakers)";
break;
case (waveformat_mask_surround71):
result += "7.1";
break;
default:
result += std::to_string(waveformat.Format.nChannels) + " channels (unrecognized)";
break;
}
return result;
}
} // namespace
using namespace std::literals;
namespace platf::audio {
template <class T>
void
Release(T *p) {
p->Release();
}
template <class T>
void
co_task_free(T *p) {
CoTaskMemFree((LPVOID) p);
}
using device_enum_t = util::safe_ptr<IMMDeviceEnumerator, Release<IMMDeviceEnumerator>>;
using device_t = util::safe_ptr<IMMDevice, Release<IMMDevice>>;
using collection_t = util::safe_ptr<IMMDeviceCollection, Release<IMMDeviceCollection>>;
using audio_client_t = util::safe_ptr<IAudioClient, Release<IAudioClient>>;
using audio_capture_t = util::safe_ptr<IAudioCaptureClient, Release<IAudioCaptureClient>>;
using wave_format_t = util::safe_ptr<WAVEFORMATEX, co_task_free<WAVEFORMATEX>>;
using wstring_t = util::safe_ptr<WCHAR, co_task_free<WCHAR>>;
using handle_t = util::safe_ptr_v2<void, BOOL, CloseHandle>;
using policy_t = util::safe_ptr<IPolicyConfig, Release<IPolicyConfig>>;
using prop_t = util::safe_ptr<IPropertyStore, Release<IPropertyStore>>;
class co_init_t: public deinit_t {
public:
co_init_t() {
CoInitializeEx(nullptr, COINIT_MULTITHREADED | COINIT_SPEED_OVER_MEMORY);
}
~co_init_t() override {
CoUninitialize();
}
};
class prop_var_t {
public:
prop_var_t() {
PropVariantInit(&prop);
}
~prop_var_t() {
PropVariantClear(&prop);
}
PROPVARIANT prop;
};
struct format_t {
WORD channel_count;
std::string name;
int capture_waveformat_channel_mask;
virtual_sink_waveformats_t virtual_sink_waveformats;
};
const std::array<const format_t, 3> formats = {
format_t {
2,
"Stereo",
waveformat_mask_stereo,
create_virtual_sink_waveformats<2>(),
},
format_t {
6,
"Surround 5.1",
waveformat_mask_surround51_with_backspeakers,
create_virtual_sink_waveformats<6>(),
},
format_t {
8,
"Surround 7.1",
waveformat_mask_surround71,
create_virtual_sink_waveformats<8>(),
},
};
audio_client_t
make_audio_client(device_t &device, const format_t &format) {
audio_client_t audio_client;
auto status = device->Activate(
IID_IAudioClient,
CLSCTX_ALL,
nullptr,
(void **) &audio_client);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't activate Device: [0x"sv << util::hex(status).to_string_view() << ']';
return nullptr;
}
WAVEFORMATEXTENSIBLE capture_waveformat =
create_waveformat(sample_format_e::f32, format.channel_count, format.capture_waveformat_channel_mask);
{
wave_format_t mixer_waveformat;
status = audio_client->GetMixFormat(&mixer_waveformat);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't get mix format for audio device: [0x"sv << util::hex(status).to_string_view() << ']';
return nullptr;
}
// Prefer the native channel layout of captured audio device when channel counts match
if (mixer_waveformat->nChannels == format.channel_count &&
mixer_waveformat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
mixer_waveformat->cbSize >= 22) {
auto waveformatext_pointer = reinterpret_cast<const WAVEFORMATEXTENSIBLE *>(mixer_waveformat.get());
capture_waveformat.dwChannelMask = waveformatext_pointer->dwChannelMask;
}
}
status = audio_client->Initialize(
AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY, // Enable automatic resampling to 48 KHz
0, 0,
(LPWAVEFORMATEX) &capture_waveformat,
nullptr);
if (status) {
BOOST_LOG(error) << "Couldn't initialize audio client for ["sv << format.name << "]: [0x"sv << util::hex(status).to_string_view() << ']';
return nullptr;
}
BOOST_LOG(info) << "Audio capture format is " << logging::bracket(waveformat_to_pretty_string(capture_waveformat));
return audio_client;
}
device_t
default_device(device_enum_t &device_enum) {
device_t device;
HRESULT status;
status = device_enum->GetDefaultAudioEndpoint(
eRender,
eConsole,
&device);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't get default audio endpoint [0x"sv << util::hex(status).to_string_view() << ']';
return nullptr;
}
return device;
}
class audio_notification_t: public ::IMMNotificationClient {
public:
audio_notification_t() {}
// IUnknown implementation (unused by IMMDeviceEnumerator)
ULONG STDMETHODCALLTYPE
AddRef() {
return 1;
}
ULONG STDMETHODCALLTYPE
Release() {
return 1;
}
HRESULT STDMETHODCALLTYPE
QueryInterface(REFIID riid, VOID **ppvInterface) {
if (IID_IUnknown == riid) {
AddRef();
*ppvInterface = (IUnknown *) this;
return S_OK;
}
else if (__uuidof(IMMNotificationClient) == riid) {
AddRef();
*ppvInterface = (IMMNotificationClient *) this;
return S_OK;
}
else {
*ppvInterface = NULL;
return E_NOINTERFACE;
}
}
// IMMNotificationClient
HRESULT STDMETHODCALLTYPE
OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR pwstrDeviceId) {
if (flow == eRender) {
default_render_device_changed_flag.store(true);
}
return S_OK;
}
HRESULT STDMETHODCALLTYPE
OnDeviceAdded(LPCWSTR pwstrDeviceId) { return S_OK; }
HRESULT STDMETHODCALLTYPE
OnDeviceRemoved(LPCWSTR pwstrDeviceId) { return S_OK; }
HRESULT STDMETHODCALLTYPE
OnDeviceStateChanged(
LPCWSTR pwstrDeviceId,
DWORD dwNewState) { return S_OK; }
HRESULT STDMETHODCALLTYPE
OnPropertyValueChanged(
LPCWSTR pwstrDeviceId,
const PROPERTYKEY key) { return S_OK; }
/**
* @brief Checks if the default rendering device changed and resets the change flag
* @return `true` if the device changed since last call
*/
bool
check_default_render_device_changed() {
return default_render_device_changed_flag.exchange(false);
}
private:
std::atomic_bool default_render_device_changed_flag;
};
class mic_wasapi_t: public mic_t {
public:
capture_e
sample(std::vector<float> &sample_out) override {
auto sample_size = sample_out.size();
// Refill the sample buffer if needed
while (sample_buf_pos - std::begin(sample_buf) < sample_size) {
auto capture_result = _fill_buffer();
if (capture_result != capture_e::ok) {
return capture_result;
}
}
// Fill the output buffer with samples
std::copy_n(std::begin(sample_buf), sample_size, std::begin(sample_out));
// Move any excess samples to the front of the buffer
std::move(&sample_buf[sample_size], sample_buf_pos, std::begin(sample_buf));
sample_buf_pos -= sample_size;
return capture_e::ok;
}
int
init(std::uint32_t sample_rate, std::uint32_t frame_size, std::uint32_t channels_out) {
audio_event.reset(CreateEventA(nullptr, FALSE, FALSE, nullptr));
if (!audio_event) {
BOOST_LOG(error) << "Couldn't create Event handle"sv;
return -1;
}
HRESULT status;
status = CoCreateInstance(
CLSID_MMDeviceEnumerator,
nullptr,
CLSCTX_ALL,
IID_IMMDeviceEnumerator,
(void **) &device_enum);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't create Device Enumerator [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
status = device_enum->RegisterEndpointNotificationCallback(&endpt_notification);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't register endpoint notification [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
auto device = default_device(device_enum);
if (!device) {
return -1;
}
for (const auto &format : formats) {
if (format.channel_count != channels_out) {
BOOST_LOG(debug) << "Skipping audio format ["sv << format.name << "] with channel count ["sv
<< format.channel_count << " != "sv << channels_out << ']';
continue;
}
BOOST_LOG(debug) << "Trying audio format ["sv << format.name << ']';
audio_client = make_audio_client(device, format);
if (audio_client) {
BOOST_LOG(debug) << "Found audio format ["sv << format.name << ']';
channels = channels_out;
break;
}
}
if (!audio_client) {
BOOST_LOG(error) << "Couldn't find supported format for audio"sv;
return -1;
}
REFERENCE_TIME default_latency;
audio_client->GetDevicePeriod(&default_latency, nullptr);
default_latency_ms = default_latency / 1000;
std::uint32_t frames;
status = audio_client->GetBufferSize(&frames);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't acquire the number of audio frames [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
// *2 --> needs to fit double
sample_buf = util::buffer_t<float> { std::max(frames, frame_size) * 2 * channels_out };
sample_buf_pos = std::begin(sample_buf);
status = audio_client->GetService(IID_IAudioCaptureClient, (void **) &audio_capture);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't initialize audio capture client [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
status = audio_client->SetEventHandle(audio_event.get());
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't set event handle [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
{
DWORD task_index = 0;
mmcss_task_handle = AvSetMmThreadCharacteristics("Pro Audio", &task_index);
if (!mmcss_task_handle) {
BOOST_LOG(error) << "Couldn't associate audio capture thread with Pro Audio MMCSS task [0x" << util::hex(GetLastError()).to_string_view() << ']';
}
}
status = audio_client->Start();
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't start recording [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
return 0;
}
~mic_wasapi_t() override {
if (device_enum) {
device_enum->UnregisterEndpointNotificationCallback(&endpt_notification);
}
if (audio_client) {
audio_client->Stop();
}
if (mmcss_task_handle) {
AvRevertMmThreadCharacteristics(mmcss_task_handle);
}
}
private:
capture_e
_fill_buffer() {
HRESULT status;
// Total number of samples
struct sample_aligned_t {
std::uint32_t uninitialized;
float *samples;
} sample_aligned;
// number of samples / number of channels
struct block_aligned_t {
std::uint32_t audio_sample_size;
} block_aligned;
// Check if the default audio device has changed
if (endpt_notification.check_default_render_device_changed()) {
// Invoke the audio_control_t's callback if it wants one
if (default_endpt_changed_cb) {
(*default_endpt_changed_cb)();
}
// Reinitialize to pick up the new default device
return capture_e::reinit;
}
status = WaitForSingleObjectEx(audio_event.get(), default_latency_ms, FALSE);
switch (status) {
case WAIT_OBJECT_0:
break;
case WAIT_TIMEOUT:
return capture_e::timeout;
default:
BOOST_LOG(error) << "Couldn't wait for audio event: [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
std::uint32_t packet_size {};
for (
status = audio_capture->GetNextPacketSize(&packet_size);
SUCCEEDED(status) && packet_size > 0;
status = audio_capture->GetNextPacketSize(&packet_size)) {
DWORD buffer_flags;
status = audio_capture->GetBuffer(
(BYTE **) &sample_aligned.samples,
&block_aligned.audio_sample_size,
&buffer_flags,
nullptr, nullptr);
switch (status) {
case S_OK:
break;
case AUDCLNT_E_DEVICE_INVALIDATED:
return capture_e::reinit;
default:
BOOST_LOG(error) << "Couldn't capture audio [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
if (buffer_flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) {
BOOST_LOG(debug) << "Audio capture signaled buffer discontinuity";
}
sample_aligned.uninitialized = std::end(sample_buf) - sample_buf_pos;
auto n = std::min(sample_aligned.uninitialized, block_aligned.audio_sample_size * channels);
if (n < block_aligned.audio_sample_size * channels) {
BOOST_LOG(warning) << "Audio capture buffer overflow";
}
if (buffer_flags & AUDCLNT_BUFFERFLAGS_SILENT) {
std::fill_n(sample_buf_pos, n, 0);
}
else {
std::copy_n(sample_aligned.samples, n, sample_buf_pos);
}
sample_buf_pos += n;
audio_capture->ReleaseBuffer(block_aligned.audio_sample_size);
}
if (status == AUDCLNT_E_DEVICE_INVALIDATED) {
return capture_e::reinit;
}
if (FAILED(status)) {
return capture_e::error;
}
return capture_e::ok;
}
public:
handle_t audio_event;
device_enum_t device_enum;
device_t device;
audio_client_t audio_client;
audio_capture_t audio_capture;
audio_notification_t endpt_notification;
std::optional<std::function<void()>> default_endpt_changed_cb;
REFERENCE_TIME default_latency_ms;
util::buffer_t<float> sample_buf;
float *sample_buf_pos;
int channels;
HANDLE mmcss_task_handle = NULL;
};
class audio_control_t: public ::platf::audio_control_t {
public:
std::optional<sink_t>
sink_info() override {
sink_t sink;
// Fill host sink name with the device_id of the current default audio device.
{
auto device = default_device(device_enum);
if (!device) {
return std::nullopt;
}
audio::wstring_t id;
device->GetId(&id);
sink.host = to_utf8(id.get());
}
// Prepare to search for the device_id of the virtual audio sink device,
// this device can be either user-configured or
// the Steam Streaming Speakers we use by default.
match_fields_list_t match_list;
if (config::audio.virtual_sink.empty()) {
match_list = match_steam_speakers();
}
else {
match_list = match_all_fields(from_utf8(config::audio.virtual_sink));
}
// Search for the virtual audio sink device currently present in the system.
auto matched = find_device_id(match_list);
if (matched) {
// Prepare to fill virtual audio sink names with device_id.
auto device_id = to_utf8(matched->second);
// Also prepend format name (basically channel layout at the moment)
// because we don't want to extend the platform interface.
sink.null = std::make_optional(sink_t::null_t {
"virtual-"s + formats[0].name + device_id,
"virtual-"s + formats[1].name + device_id,
"virtual-"s + formats[2].name + device_id,
});
}
else if (!config::audio.virtual_sink.empty()) {
BOOST_LOG(warning) << "Couldn't find the specified virtual audio sink " << config::audio.virtual_sink;
}
return sink;
}
/**
* @brief Extract virtual audio sink information possibly encoded in the sink name.
* @param sink The sink name
* @return A pair of device_id and format reference if the sink name matches
* our naming scheme for virtual audio sinks, `std::nullopt` otherwise.
*/
std::optional<std::pair<std::wstring, std::reference_wrapper<const format_t>>>
extract_virtual_sink_info(const std::string &sink) {
// Encoding format:
// [virtual-(format name)]device_id
std::string current = sink;
auto prefix = "virtual-"sv;
if (current.find(prefix) == 0) {
current = current.substr(prefix.size(), current.size() - prefix.size());
for (const auto &format : formats) {
auto &name = format.name;
if (current.find(name) == 0) {
auto device_id = from_utf8(current.substr(name.size(), current.size() - name.size()));
return std::make_pair(device_id, std::reference_wrapper(format));
}
}
}
return std::nullopt;
}
std::unique_ptr<mic_t>
microphone(const std::uint8_t *mapping, int channels, std::uint32_t sample_rate, std::uint32_t frame_size) override {
auto mic = std::make_unique<mic_wasapi_t>();
if (mic->init(sample_rate, frame_size, channels)) {
return nullptr;
}
// If this is a virtual sink, set a callback that will change the sink back if it's changed
auto virtual_sink_info = extract_virtual_sink_info(assigned_sink);
if (virtual_sink_info) {
mic->default_endpt_changed_cb = [this] {
BOOST_LOG(info) << "Resetting sink to ["sv << assigned_sink << "] after default changed";
set_sink(assigned_sink);
};
}
return mic;
}
/**
* If the requested sink is a virtual sink, meaning no speakers attached to
* the host, then we can seamlessly set the format to stereo and surround sound.
*
* Any virtual sink detected will be prefixed by:
* virtual-(format name)
* If it doesn't contain that prefix, then the format will not be changed
*/
std::optional<std::wstring>
set_format(const std::string &sink) {
if (sink.empty()) {
return std::nullopt;
}
auto virtual_sink_info = extract_virtual_sink_info(sink);
if (!virtual_sink_info) {
// Sink name does not begin with virtual-(format name), hence it's not a virtual sink
// and we don't want to change playback format of the corresponding device.
// Also need to perform matching, sink name is not necessarily device_id in this case.
auto matched = find_device_id(match_all_fields(from_utf8(sink)));
if (matched) {
return matched->second;
}
else {
BOOST_LOG(error) << "Couldn't find audio sink " << sink;
return std::nullopt;
}
}
auto &device_id = virtual_sink_info->first;
auto &waveformats = virtual_sink_info->second.get().virtual_sink_waveformats;
for (const auto &waveformat : waveformats) {
// We're using completely undocumented and unlisted API,
// better not pass objects without copying them first.
auto device_id_copy = device_id;
auto waveformat_copy = waveformat;
auto waveformat_copy_pointer = reinterpret_cast<WAVEFORMATEX *>(&waveformat_copy);
WAVEFORMATEXTENSIBLE p {};
if (SUCCEEDED(policy->SetDeviceFormat(device_id_copy.c_str(), waveformat_copy_pointer, (WAVEFORMATEX *) &p))) {
BOOST_LOG(info) << "Changed virtual audio sink format to " << logging::bracket(waveformat_to_pretty_string(waveformat));
return device_id;
}
}
BOOST_LOG(error) << "Couldn't set virtual audio sink waveformat";
return std::nullopt;
}
int
set_sink(const std::string &sink) override {
auto device_id = set_format(sink);
if (!device_id) {
return -1;
}
int failure {};
for (int x = 0; x < (int) ERole_enum_count; ++x) {
auto status = policy->SetDefaultEndpoint(device_id->c_str(), (ERole) x);
if (status) {
// Depending on the format of the string, we could get either of these errors
if (status == HRESULT_FROM_WIN32(ERROR_NOT_FOUND) || status == E_INVALIDARG) {
BOOST_LOG(warning) << "Audio sink not found: "sv << sink;
}
else {
BOOST_LOG(warning) << "Couldn't set ["sv << sink << "] to role ["sv << x << "]: 0x"sv << util::hex(status).to_string_view();
}
++failure;
}
}
// Remember the assigned sink name, so we have it for later if we need to set it
// back after another application changes it
if (!failure) {
assigned_sink = sink;
}
return failure;
}
enum class match_field_e {
device_id, ///< Match device_id
device_friendly_name, ///< Match endpoint friendly name
adapter_friendly_name, ///< Match adapter friendly name
device_description, ///< Match endpoint description
};
using match_fields_list_t = std::vector<std::pair<match_field_e, std::wstring>>;
using matched_field_t = std::pair<match_field_e, std::wstring>;
audio_control_t::match_fields_list_t
match_steam_speakers() {
return {
{ match_field_e::adapter_friendly_name, L"Steam Streaming Speakers" }
};
}
audio_control_t::match_fields_list_t
match_all_fields(const std::wstring &name) {
return {
{ match_field_e::device_id, name }, // {0.0.0.00000000}.{29dd7668-45b2-4846-882d-950f55bf7eb8}
{ match_field_e::device_friendly_name, name }, // Digital Audio (S/PDIF) (High Definition Audio Device)
{ match_field_e::device_description, name }, // Digital Audio (S/PDIF)
{ match_field_e::adapter_friendly_name, name }, // High Definition Audio Device
};
}
/**
* @brief Search for currently present audio device_id using multiple match fields.
* @param match_list Pairs of match fields and values
* @return Optional pair of matched field and device_id
*/
std::optional<matched_field_t>
find_device_id(const match_fields_list_t &match_list) {
if (match_list.empty()) {
return std::nullopt;
}
collection_t collection;
auto status = device_enum->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &collection);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't enumerate: [0x"sv << util::hex(status).to_string_view() << ']';
return std::nullopt;
}
UINT count = 0;
collection->GetCount(&count);
std::vector<std::wstring> matched(match_list.size());
for (auto x = 0; x < count; ++x) {
audio::device_t device;
collection->Item(x, &device);
audio::wstring_t wstring_id;
device->GetId(&wstring_id);
std::wstring device_id = wstring_id.get();
audio::prop_t prop;
device->OpenPropertyStore(STGM_READ, &prop);
prop_var_t adapter_friendly_name;
prop_var_t device_friendly_name;
prop_var_t device_desc;
prop->GetValue(PKEY_Device_FriendlyName, &device_friendly_name.prop);
prop->GetValue(PKEY_DeviceInterface_FriendlyName, &adapter_friendly_name.prop);
prop->GetValue(PKEY_Device_DeviceDesc, &device_desc.prop);
for (size_t i = 0; i < match_list.size(); i++) {
if (matched[i].empty()) {
const wchar_t *match_value = nullptr;
switch (match_list[i].first) {
case match_field_e::device_id:
match_value = device_id.c_str();
break;
case match_field_e::device_friendly_name:
match_value = device_friendly_name.prop.pwszVal;
break;
case match_field_e::adapter_friendly_name:
match_value = adapter_friendly_name.prop.pwszVal;
break;
case match_field_e::device_description:
match_value = device_desc.prop.pwszVal;
break;
}
if (match_value && std::wcscmp(match_value, match_list[i].second.c_str()) == 0) {
matched[i] = device_id;
}
}
}
}
for (size_t i = 0; i < match_list.size(); i++) {
if (!matched[i].empty()) {
return matched_field_t(match_list[i].first, matched[i]);
}
}
return std::nullopt;
}
/**
* @brief Resets the default audio device from Steam Streaming Speakers.
*/
void
reset_default_device() {
auto matched_steam = find_device_id(match_steam_speakers());
if (!matched_steam) {
return;
}
auto steam_device_id = matched_steam->second;
{
// Get the current default audio device (if present)
auto current_default_dev = default_device(device_enum);
if (!current_default_dev) {
return;
}
audio::wstring_t current_default_id;
current_default_dev->GetId(¤t_default_id);
// If Steam Streaming Speakers are already not default, we're done.
if (steam_device_id != current_default_id.get()) {
return;
}
}
// Disable the Steam Streaming Speakers temporarily to allow the OS to pick a new default.
auto hr = policy->SetEndpointVisibility(steam_device_id.c_str(), FALSE);
if (FAILED(hr)) {
BOOST_LOG(warning) << "Failed to disable Steam audio device: "sv << util::hex(hr).to_string_view();
return;
}
// Get the newly selected default audio device
auto new_default_dev = default_device(device_enum);
// Enable the Steam Streaming Speakers again
hr = policy->SetEndpointVisibility(steam_device_id.c_str(), TRUE);
if (FAILED(hr)) {
BOOST_LOG(warning) << "Failed to enable Steam audio device: "sv << util::hex(hr).to_string_view();
return;
}
// If there's now no audio device, the Steam Streaming Speakers were the only device available.
// There's no other device to set as the default, so just return.
if (!new_default_dev) {
return;
}
audio::wstring_t new_default_id;
new_default_dev->GetId(&new_default_id);
// Set the new default audio device
for (int x = 0; x < (int) ERole_enum_count; ++x) {
policy->SetDefaultEndpoint(new_default_id.get(), (ERole) x);
}
BOOST_LOG(info) << "Successfully reset default audio device"sv;
}
/**
* @brief Installs the Steam Streaming Speakers driver, if present.
* @return `true` if installation was successful.
*/
bool
install_steam_audio_drivers() {
#ifdef STEAM_DRIVER_SUBDIR
// MinGW's libnewdev.a is missing DiInstallDriverW() even though the headers have it,
// so we have to load it at runtime. It's Vista or later, so it will always be available.
auto newdev = LoadLibraryExW(L"newdev.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
if (!newdev) {
BOOST_LOG(error) << "newdev.dll failed to load"sv;
return false;
}
auto fg = util::fail_guard([newdev]() {
FreeLibrary(newdev);
});
auto fn_DiInstallDriverW = (decltype(DiInstallDriverW) *) GetProcAddress(newdev, "DiInstallDriverW");
if (!fn_DiInstallDriverW) {
BOOST_LOG(error) << "DiInstallDriverW() is missing"sv;
return false;
}
// Get the current default audio device (if present)
auto old_default_dev = default_device(device_enum);
// Install the Steam Streaming Speakers driver
WCHAR driver_path[MAX_PATH] = {};
ExpandEnvironmentStringsW(STEAM_AUDIO_DRIVER_PATH, driver_path, ARRAYSIZE(driver_path));
if (fn_DiInstallDriverW(nullptr, driver_path, 0, nullptr)) {
BOOST_LOG(info) << "Successfully installed Steam Streaming Speakers"sv;
// Wait for 5 seconds to allow the audio subsystem to reconfigure things before
// modifying the default audio device or enumerating devices again.
Sleep(5000);
// If there was a previous default device, restore that original device as the
// default output device just in case installing the new one changed it.
if (old_default_dev) {
audio::wstring_t old_default_id;
old_default_dev->GetId(&old_default_id);
for (int x = 0; x < (int) ERole_enum_count; ++x) {
policy->SetDefaultEndpoint(old_default_id.get(), (ERole) x);
}
}
return true;
}
else {
auto err = GetLastError();
switch (err) {
case ERROR_ACCESS_DENIED:
BOOST_LOG(warning) << "Administrator privileges are required to install Steam Streaming Speakers"sv;
break;
case ERROR_FILE_NOT_FOUND:
case ERROR_PATH_NOT_FOUND:
BOOST_LOG(info) << "Steam audio drivers not found. This is expected if you don't have Steam installed."sv;
break;
default:
BOOST_LOG(warning) << "Failed to install Steam audio drivers: "sv << err;
break;
}
return false;
}
#else
BOOST_LOG(warning) << "Unable to install Steam Streaming Speakers on unknown architecture"sv;
return false;
#endif
}
int
init() {
auto status = CoCreateInstance(
CLSID_CPolicyConfigClient,
nullptr,
CLSCTX_ALL,
IID_IPolicyConfig,
(void **) &policy);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't create audio policy config: [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
status = CoCreateInstance(
CLSID_MMDeviceEnumerator,
nullptr,
CLSCTX_ALL,
IID_IMMDeviceEnumerator,
(void **) &device_enum);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't create Device Enumerator: [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
return 0;
}
~audio_control_t() override {}
policy_t policy;
audio::device_enum_t device_enum;
std::string assigned_sink;
};
} // namespace platf::audio
namespace platf {
// It's not big enough to justify it's own source file :/
namespace dxgi {
int
init();
}
std::unique_ptr<audio_control_t>
audio_control() {
auto control = std::make_unique<audio::audio_control_t>();
if (control->init()) {
return nullptr;
}
// Install Steam Streaming Speakers if needed. We do this during audio_control() to ensure
// the sink information returned includes the new Steam Streaming Speakers device.
if (config::audio.install_steam_drivers && !control->find_device_id(control->match_steam_speakers())) {
// This is best effort. Don't fail if it doesn't work.
control->install_steam_audio_drivers();
}
return control;
}
std::unique_ptr<deinit_t>
init() {
if (dxgi::init()) {
return nullptr;
}
// Initialize COM
auto co_init = std::make_unique<platf::audio::co_init_t>();
// If Steam Streaming Speakers are currently the default audio device,
// change the default to something else (if another device is available).
audio::audio_control_t audio_ctrl;
if (audio_ctrl.init() == 0) {
audio_ctrl.reset_default_device();
}
return co_init;
}
} // namespace platf
| 39,007
|
C++
|
.cpp
| 965
| 32.655959
| 155
| 0.631512
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,018
|
misc.cpp
|
LizardByte_Sunshine/src/platform/windows/misc.cpp
|
/**
* @file src/platform/windows/misc.cpp
* @brief Miscellaneous definitions for Windows.
*/
#include <csignal>
#include <filesystem>
#include <iomanip>
#include <set>
#include <sstream>
#include <boost/algorithm/string.hpp>
#include <boost/asio/ip/address.hpp>
#include <boost/process/v1.hpp>
#include <boost/program_options/parsers.hpp>
// prevent clang format from "optimizing" the header include order
// clang-format off
#include <dwmapi.h>
#include <iphlpapi.h>
#include <iterator>
#include <timeapi.h>
#include <userenv.h>
#include <winsock2.h>
#include <windows.h>
#include <winuser.h>
#include <wlanapi.h>
#include <ws2tcpip.h>
#include <wtsapi32.h>
#include <sddl.h>
// clang-format on
// Boost overrides NTDDI_VERSION, so we re-override it here
#undef NTDDI_VERSION
#define NTDDI_VERSION NTDDI_WIN10
#include <Shlwapi.h>
#include "misc.h"
#include "src/entry_handler.h"
#include "src/globals.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include <iterator>
#include "nvprefs/nvprefs_interface.h"
// UDP_SEND_MSG_SIZE was added in the Windows 10 20H1 SDK
#ifndef UDP_SEND_MSG_SIZE
#define UDP_SEND_MSG_SIZE 2
#endif
// PROC_THREAD_ATTRIBUTE_JOB_LIST is currently missing from MinGW headers
#ifndef PROC_THREAD_ATTRIBUTE_JOB_LIST
#define PROC_THREAD_ATTRIBUTE_JOB_LIST ProcThreadAttributeValue(13, FALSE, TRUE, FALSE)
#endif
#include <qos2.h>
#ifndef WLAN_API_MAKE_VERSION
#define WLAN_API_MAKE_VERSION(_major, _minor) (((DWORD) (_minor)) << 16 | (_major))
#endif
#include <winternl.h>
extern "C" {
NTSTATUS NTAPI
NtSetTimerResolution(ULONG DesiredResolution, BOOLEAN SetResolution, PULONG CurrentResolution);
}
namespace {
std::atomic<bool> used_nt_set_timer_resolution = false;
bool
nt_set_timer_resolution_max() {
ULONG minimum, maximum, current;
if (!NT_SUCCESS(NtQueryTimerResolution(&minimum, &maximum, ¤t)) ||
!NT_SUCCESS(NtSetTimerResolution(maximum, TRUE, ¤t))) {
return false;
}
return true;
}
bool
nt_set_timer_resolution_min() {
ULONG minimum, maximum, current;
if (!NT_SUCCESS(NtQueryTimerResolution(&minimum, &maximum, ¤t)) ||
!NT_SUCCESS(NtSetTimerResolution(minimum, TRUE, ¤t))) {
return false;
}
return true;
}
} // namespace
namespace bp = boost::process;
using namespace std::literals;
namespace platf {
using adapteraddrs_t = util::c_ptr<IP_ADAPTER_ADDRESSES>;
bool enabled_mouse_keys = false;
MOUSEKEYS previous_mouse_keys_state;
HANDLE qos_handle = nullptr;
decltype(QOSCreateHandle) *fn_QOSCreateHandle = nullptr;
decltype(QOSAddSocketToFlow) *fn_QOSAddSocketToFlow = nullptr;
decltype(QOSRemoveSocketFromFlow) *fn_QOSRemoveSocketFromFlow = nullptr;
HANDLE wlan_handle = nullptr;
decltype(WlanOpenHandle) *fn_WlanOpenHandle = nullptr;
decltype(WlanCloseHandle) *fn_WlanCloseHandle = nullptr;
decltype(WlanFreeMemory) *fn_WlanFreeMemory = nullptr;
decltype(WlanEnumInterfaces) *fn_WlanEnumInterfaces = nullptr;
decltype(WlanSetInterface) *fn_WlanSetInterface = nullptr;
std::filesystem::path
appdata() {
WCHAR sunshine_path[MAX_PATH];
GetModuleFileNameW(NULL, sunshine_path, _countof(sunshine_path));
return std::filesystem::path { sunshine_path }.remove_filename() / L"config"sv;
}
std::string
from_sockaddr(const sockaddr *const socket_address) {
char data[INET6_ADDRSTRLEN] = {};
auto family = socket_address->sa_family;
if (family == AF_INET6) {
inet_ntop(AF_INET6, &((sockaddr_in6 *) socket_address)->sin6_addr, data, INET6_ADDRSTRLEN);
}
else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *) socket_address)->sin_addr, data, INET_ADDRSTRLEN);
}
return std::string { data };
}
std::pair<std::uint16_t, std::string>
from_sockaddr_ex(const sockaddr *const ip_addr) {
char data[INET6_ADDRSTRLEN] = {};
auto family = ip_addr->sa_family;
std::uint16_t port = 0;
if (family == AF_INET6) {
inet_ntop(AF_INET6, &((sockaddr_in6 *) ip_addr)->sin6_addr, data, INET6_ADDRSTRLEN);
port = ((sockaddr_in6 *) ip_addr)->sin6_port;
}
else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *) ip_addr)->sin_addr, data, INET_ADDRSTRLEN);
port = ((sockaddr_in *) ip_addr)->sin_port;
}
return { port, std::string { data } };
}
adapteraddrs_t
get_adapteraddrs() {
adapteraddrs_t info { nullptr };
ULONG size = 0;
while (GetAdaptersAddresses(AF_UNSPEC, 0, nullptr, info.get(), &size) == ERROR_BUFFER_OVERFLOW) {
info.reset((PIP_ADAPTER_ADDRESSES) malloc(size));
}
return info;
}
std::string
get_mac_address(const std::string_view &address) {
adapteraddrs_t info = get_adapteraddrs();
for (auto adapter_pos = info.get(); adapter_pos != nullptr; adapter_pos = adapter_pos->Next) {
for (auto addr_pos = adapter_pos->FirstUnicastAddress; addr_pos != nullptr; addr_pos = addr_pos->Next) {
if (adapter_pos->PhysicalAddressLength != 0 && address == from_sockaddr(addr_pos->Address.lpSockaddr)) {
std::stringstream mac_addr;
mac_addr << std::hex;
for (int i = 0; i < adapter_pos->PhysicalAddressLength; i++) {
if (i > 0) {
mac_addr << ':';
}
mac_addr << std::setw(2) << std::setfill('0') << (int) adapter_pos->PhysicalAddress[i];
}
return mac_addr.str();
}
}
}
BOOST_LOG(warning) << "Unable to find MAC address for "sv << address;
return "00:00:00:00:00:00"s;
}
HDESK
syncThreadDesktop() {
auto hDesk = OpenInputDesktop(DF_ALLOWOTHERACCOUNTHOOK, FALSE, GENERIC_ALL);
if (!hDesk) {
auto err = GetLastError();
BOOST_LOG(error) << "Failed to Open Input Desktop [0x"sv << util::hex(err).to_string_view() << ']';
return nullptr;
}
if (!SetThreadDesktop(hDesk)) {
auto err = GetLastError();
BOOST_LOG(error) << "Failed to sync desktop to thread [0x"sv << util::hex(err).to_string_view() << ']';
}
CloseDesktop(hDesk);
return hDesk;
}
void
print_status(const std::string_view &prefix, HRESULT status) {
char err_string[1024];
DWORD bytes = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
nullptr,
status,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
err_string,
sizeof(err_string),
nullptr);
BOOST_LOG(error) << prefix << ": "sv << std::string_view { err_string, bytes };
}
bool
IsUserAdmin(HANDLE user_token) {
WINBOOL ret;
SID_IDENTIFIER_AUTHORITY NtAuthority = SECURITY_NT_AUTHORITY;
PSID AdministratorsGroup;
ret = AllocateAndInitializeSid(
&NtAuthority,
2,
SECURITY_BUILTIN_DOMAIN_RID,
DOMAIN_ALIAS_RID_ADMINS,
0, 0, 0, 0, 0, 0,
&AdministratorsGroup);
if (ret) {
if (!CheckTokenMembership(user_token, AdministratorsGroup, &ret)) {
ret = false;
BOOST_LOG(error) << "Failed to verify token membership for administrative access: " << GetLastError();
}
FreeSid(AdministratorsGroup);
}
else {
BOOST_LOG(error) << "Unable to allocate SID to check administrative access: " << GetLastError();
}
return ret;
}
/**
* @brief Obtain the current sessions user's primary token with elevated privileges.
* @return The user's token. If user has admin capability it will be elevated, otherwise it will be a limited token. On error, `nullptr`.
*/
HANDLE
retrieve_users_token(bool elevated) {
DWORD consoleSessionId;
HANDLE userToken;
TOKEN_ELEVATION_TYPE elevationType;
DWORD dwSize;
// Get the session ID of the active console session
consoleSessionId = WTSGetActiveConsoleSessionId();
if (0xFFFFFFFF == consoleSessionId) {
// If there is no active console session, log a warning and return null
BOOST_LOG(warning) << "There isn't an active user session, therefore it is not possible to execute commands under the users profile.";
return nullptr;
}
// Get the user token for the active console session
if (!WTSQueryUserToken(consoleSessionId, &userToken)) {
BOOST_LOG(debug) << "QueryUserToken failed, this would prevent commands from launching under the users profile.";
return nullptr;
}
// We need to know if this is an elevated token or not.
// Get the elevation type of the user token
// Elevation - Default: User is not an admin, UAC enabled/disabled does not matter.
// Elevation - Limited: User is an admin, has UAC enabled.
// Elevation - Full: User is an admin, has UAC disabled.
if (!GetTokenInformation(userToken, TokenElevationType, &elevationType, sizeof(TOKEN_ELEVATION_TYPE), &dwSize)) {
BOOST_LOG(debug) << "Retrieving token information failed: " << GetLastError();
CloseHandle(userToken);
return nullptr;
}
// User is currently not an administrator
// The documentation for this scenario is conflicting, so we'll double check to see if user is actually an admin.
if (elevated && (elevationType == TokenElevationTypeDefault && !IsUserAdmin(userToken))) {
// We don't have to strip the token or do anything here, but let's give the user a warning so they're aware what is happening.
BOOST_LOG(warning) << "This command requires elevation and the current user account logged in does not have administrator rights. "
<< "For security reasons Sunshine will retain the same access level as the current user and will not elevate it.";
}
// User has a limited token, this means they have UAC enabled and is an Administrator
if (elevated && elevationType == TokenElevationTypeLimited) {
TOKEN_LINKED_TOKEN linkedToken;
// Retrieve the administrator token that is linked to the limited token
if (!GetTokenInformation(userToken, TokenLinkedToken, reinterpret_cast<void *>(&linkedToken), sizeof(TOKEN_LINKED_TOKEN), &dwSize)) {
// If the retrieval failed, log an error message and return null
BOOST_LOG(error) << "Retrieving linked token information failed: " << GetLastError();
CloseHandle(userToken);
// There is no scenario where this should be hit, except for an actual error.
return nullptr;
}
// Since we need the elevated token, we'll replace it with their administrative token.
CloseHandle(userToken);
userToken = linkedToken.LinkedToken;
}
// We don't need to do anything for TokenElevationTypeFull users here, because they're already elevated.
return userToken;
}
bool
merge_user_environment_block(bp::environment &env, HANDLE shell_token) {
// Get the target user's environment block
PVOID env_block;
if (!CreateEnvironmentBlock(&env_block, shell_token, FALSE)) {
return false;
}
// Parse the environment block and populate env
for (auto c = (PWCHAR) env_block; *c != UNICODE_NULL; c += wcslen(c) + 1) {
// Environment variable entries end with a null-terminator, so std::wstring() will get an entire entry.
std::string env_tuple = to_utf8(std::wstring { c });
std::string env_name = env_tuple.substr(0, env_tuple.find('='));
std::string env_val = env_tuple.substr(env_tuple.find('=') + 1);
// Perform a case-insensitive search to see if this variable name already exists
auto itr = std::find_if(env.cbegin(), env.cend(),
[&](const auto &e) { return boost::iequals(e.get_name(), env_name); });
if (itr != env.cend()) {
// Use this existing name if it is already present to ensure we merge properly
env_name = itr->get_name();
}
// For the PATH variable, we will merge the values together
if (boost::iequals(env_name, "PATH")) {
env[env_name] = env_val + ";" + env[env_name].to_string();
}
else {
// Other variables will be superseded by those in the user's environment block
env[env_name] = env_val;
}
}
DestroyEnvironmentBlock(env_block);
return true;
}
/**
* @brief Check if the current process is running with system-level privileges.
* @return `true` if the current process has system-level privileges, `false` otherwise.
*/
bool
is_running_as_system() {
BOOL ret;
PSID SystemSid;
DWORD dwSize = SECURITY_MAX_SID_SIZE;
// Allocate memory for the SID structure
SystemSid = LocalAlloc(LMEM_FIXED, dwSize);
if (SystemSid == nullptr) {
BOOST_LOG(error) << "Failed to allocate memory for the SID structure: " << GetLastError();
return false;
}
// Create a SID for the local system account
ret = CreateWellKnownSid(WinLocalSystemSid, nullptr, SystemSid, &dwSize);
if (ret) {
// Check if the current process token contains this SID
if (!CheckTokenMembership(nullptr, SystemSid, &ret)) {
BOOST_LOG(error) << "Failed to check token membership: " << GetLastError();
ret = false;
}
}
else {
BOOST_LOG(error) << "Failed to create a SID for the local system account. This may happen if the system is out of memory or if the SID buffer is too small: " << GetLastError();
}
// Free the memory allocated for the SID structure
LocalFree(SystemSid);
return ret;
}
// Note: This does NOT append a null terminator
void
append_string_to_environment_block(wchar_t *env_block, int &offset, const std::wstring &wstr) {
std::memcpy(&env_block[offset], wstr.data(), wstr.length() * sizeof(wchar_t));
offset += wstr.length();
}
std::wstring
create_environment_block(bp::environment &env) {
int size = 0;
for (const auto &entry : env) {
auto name = entry.get_name();
auto value = entry.to_string();
size += from_utf8(name).length() + 1 /* L'=' */ + from_utf8(value).length() + 1 /* L'\0' */;
}
size += 1 /* L'\0' */;
wchar_t env_block[size];
int offset = 0;
for (const auto &entry : env) {
auto name = entry.get_name();
auto value = entry.to_string();
// Construct the NAME=VAL\0 string
append_string_to_environment_block(env_block, offset, from_utf8(name));
env_block[offset++] = L'=';
append_string_to_environment_block(env_block, offset, from_utf8(value));
env_block[offset++] = L'\0';
}
// Append a final null terminator
env_block[offset++] = L'\0';
return std::wstring(env_block, offset);
}
LPPROC_THREAD_ATTRIBUTE_LIST
allocate_proc_thread_attr_list(DWORD attribute_count) {
SIZE_T size;
InitializeProcThreadAttributeList(NULL, attribute_count, 0, &size);
auto list = (LPPROC_THREAD_ATTRIBUTE_LIST) HeapAlloc(GetProcessHeap(), 0, size);
if (list == NULL) {
return NULL;
}
if (!InitializeProcThreadAttributeList(list, attribute_count, 0, &size)) {
HeapFree(GetProcessHeap(), 0, list);
return NULL;
}
return list;
}
void
free_proc_thread_attr_list(LPPROC_THREAD_ATTRIBUTE_LIST list) {
DeleteProcThreadAttributeList(list);
HeapFree(GetProcessHeap(), 0, list);
}
/**
* @brief Create a `bp::child` object from the results of launching a process.
* @param process_launched A boolean indicating if the launch was successful.
* @param cmd The command that was used to launch the process.
* @param ec A reference to an `std::error_code` object that will store any error that occurred during the launch.
* @param process_info A reference to a `PROCESS_INFORMATION` structure that contains information about the new process.
* @return A `bp::child` object representing the new process, or an empty `bp::child` object if the launch failed.
*/
bp::child
create_boost_child_from_results(bool process_launched, const std::string &cmd, std::error_code &ec, PROCESS_INFORMATION &process_info) {
// Use RAII to ensure the process is closed when we're done with it, even if there was an error.
auto close_process_handles = util::fail_guard([process_launched, process_info]() {
if (process_launched) {
CloseHandle(process_info.hThread);
CloseHandle(process_info.hProcess);
}
});
if (ec) {
// If there was an error, return an empty bp::child object
return bp::child();
}
if (process_launched) {
// If the launch was successful, create a new bp::child object representing the new process
auto child = bp::child((bp::pid_t) process_info.dwProcessId);
BOOST_LOG(info) << cmd << " running with PID "sv << child.id();
return child;
}
else {
auto winerror = GetLastError();
BOOST_LOG(error) << "Failed to launch process: "sv << winerror;
ec = std::make_error_code(std::errc::invalid_argument);
// We must NOT attach the failed process here, since this case can potentially be induced by ACL
// manipulation (denying yourself execute permission) to cause an escalation of privilege.
// So to protect ourselves against that, we'll return an empty child process instead.
return bp::child();
}
}
/**
* @brief Impersonate the current user and invoke the callback function.
* @param user_token A handle to the user's token that was obtained from the shell.
* @param callback A function that will be executed while impersonating the user.
* @return Object that will store any error that occurred during the impersonation
*/
std::error_code
impersonate_current_user(HANDLE user_token, std::function<void()> callback) {
std::error_code ec;
// Impersonate the user when launching the process. This will ensure that appropriate access
// checks are done against the user token, not our SYSTEM token. It will also allow network
// shares and mapped network drives to be used as launch targets, since those credentials
// are stored per-user.
if (!ImpersonateLoggedOnUser(user_token)) {
auto winerror = GetLastError();
// Log the failure of impersonating the user and its error code
BOOST_LOG(error) << "Failed to impersonate user: "sv << winerror;
ec = std::make_error_code(std::errc::permission_denied);
return ec;
}
// Execute the callback function while impersonating the user
callback();
// End impersonation of the logged on user. If this fails (which is extremely unlikely),
// we will be running with an unknown user token. The only safe thing to do in that case
// is terminate ourselves.
if (!RevertToSelf()) {
auto winerror = GetLastError();
// Log the failure of reverting to self and its error code
BOOST_LOG(fatal) << "Failed to revert to self after impersonation: "sv << winerror;
DebugBreak();
}
return ec;
}
/**
* @brief Create a `STARTUPINFOEXW` structure for launching a process.
* @param file A pointer to a `FILE` object that will be used as the standard output and error for the new process, or null if not needed.
* @param job A job object handle to insert the new process into. This pointer must remain valid for the life of this startup info!
* @param ec A reference to a `std::error_code` object that will store any error that occurred during the creation of the structure.
* @return A structure that contains information about how to launch the new process.
*/
STARTUPINFOEXW
create_startup_info(FILE *file, HANDLE *job, std::error_code &ec) {
// Initialize a zeroed-out STARTUPINFOEXW structure and set its size
STARTUPINFOEXW startup_info = {};
startup_info.StartupInfo.cb = sizeof(startup_info);
// Allocate a process attribute list with space for 2 elements
startup_info.lpAttributeList = allocate_proc_thread_attr_list(2);
if (startup_info.lpAttributeList == NULL) {
// If the allocation failed, set ec to an appropriate error code and return the structure
ec = std::make_error_code(std::errc::not_enough_memory);
return startup_info;
}
if (file) {
// If a file was provided, get its handle and use it as the standard output and error for the new process
HANDLE log_file_handle = (HANDLE) _get_osfhandle(_fileno(file));
// Populate std handles if the caller gave us a log file to use
startup_info.StartupInfo.dwFlags |= STARTF_USESTDHANDLES;
startup_info.StartupInfo.hStdInput = NULL;
startup_info.StartupInfo.hStdOutput = log_file_handle;
startup_info.StartupInfo.hStdError = log_file_handle;
// Allow the log file handle to be inherited by the child process (without inheriting all of
// our inheritable handles, such as our own log file handle created by SunshineSvc).
//
// Note: The value we point to here must be valid for the lifetime of the attribute list,
// so we need to point into the STARTUPINFO instead of our log_file_variable on the stack.
UpdateProcThreadAttribute(startup_info.lpAttributeList,
0,
PROC_THREAD_ATTRIBUTE_HANDLE_LIST,
&startup_info.StartupInfo.hStdOutput,
sizeof(startup_info.StartupInfo.hStdOutput),
NULL,
NULL);
}
if (job) {
// Atomically insert the new process into the specified job.
//
// Note: The value we point to here must be valid for the lifetime of the attribute list,
// so we take a HANDLE* instead of just a HANDLE to use the caller's stack storage.
UpdateProcThreadAttribute(startup_info.lpAttributeList,
0,
PROC_THREAD_ATTRIBUTE_JOB_LIST,
job,
sizeof(*job),
NULL,
NULL);
}
return startup_info;
}
/**
* @brief This function overrides HKEY_CURRENT_USER and HKEY_CLASSES_ROOT using the provided token.
* @param token The primary token identifying the user to use, or `NULL` to restore original keys.
* @return `true` if the override or restore operation was successful.
*/
bool
override_per_user_predefined_keys(HANDLE token) {
HKEY user_classes_root = NULL;
if (token) {
auto err = RegOpenUserClassesRoot(token, 0, GENERIC_ALL, &user_classes_root);
if (err != ERROR_SUCCESS) {
BOOST_LOG(error) << "Failed to open classes root for target user: "sv << err;
return false;
}
}
auto close_classes_root = util::fail_guard([user_classes_root]() {
if (user_classes_root) {
RegCloseKey(user_classes_root);
}
});
HKEY user_key = NULL;
if (token) {
impersonate_current_user(token, [&]() {
// RegOpenCurrentUser() doesn't take a token. It assumes we're impersonating the desired user.
auto err = RegOpenCurrentUser(GENERIC_ALL, &user_key);
if (err != ERROR_SUCCESS) {
BOOST_LOG(error) << "Failed to open user key for target user: "sv << err;
user_key = NULL;
}
});
if (!user_key) {
return false;
}
}
auto close_user = util::fail_guard([user_key]() {
if (user_key) {
RegCloseKey(user_key);
}
});
auto err = RegOverridePredefKey(HKEY_CLASSES_ROOT, user_classes_root);
if (err != ERROR_SUCCESS) {
BOOST_LOG(error) << "Failed to override HKEY_CLASSES_ROOT: "sv << err;
return false;
}
err = RegOverridePredefKey(HKEY_CURRENT_USER, user_key);
if (err != ERROR_SUCCESS) {
BOOST_LOG(error) << "Failed to override HKEY_CURRENT_USER: "sv << err;
RegOverridePredefKey(HKEY_CLASSES_ROOT, NULL);
return false;
}
return true;
}
/**
* @brief Quote/escape an argument according to the Windows parsing convention.
* @param argument The raw argument to process.
* @return An argument string suitable for use by CreateProcess().
*/
std::wstring
escape_argument(const std::wstring &argument) {
// If there are no characters requiring quoting/escaping, we're done
if (argument.find_first_of(L" \t\n\v\"") == argument.npos) {
return argument;
}
// The algorithm implemented here comes from a MSDN blog post:
// https://web.archive.org/web/20120201194949/http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx
std::wstring escaped_arg;
escaped_arg.push_back(L'"');
for (auto it = argument.begin();; it++) {
auto backslash_count = 0U;
while (it != argument.end() && *it == L'\\') {
it++;
backslash_count++;
}
if (it == argument.end()) {
escaped_arg.append(backslash_count * 2, L'\\');
break;
}
else if (*it == L'"') {
escaped_arg.append(backslash_count * 2 + 1, L'\\');
}
else {
escaped_arg.append(backslash_count, L'\\');
}
escaped_arg.push_back(*it);
}
escaped_arg.push_back(L'"');
return escaped_arg;
}
/**
* @brief Escape an argument according to cmd's parsing convention.
* @param argument An argument already escaped by `escape_argument()`.
* @return An argument string suitable for use by cmd.exe.
*/
std::wstring
escape_argument_for_cmd(const std::wstring &argument) {
// Start with the original string and modify from there
std::wstring escaped_arg = argument;
// Look for the next cmd metacharacter
size_t match_pos = 0;
while ((match_pos = escaped_arg.find_first_of(L"()%!^\"<>&|", match_pos)) != std::wstring::npos) {
// Insert an escape character and skip past the match
escaped_arg.insert(match_pos, 1, L'^');
match_pos += 2;
}
return escaped_arg;
}
/**
* @brief Resolve the given raw command into a proper command string for CreateProcess().
* @details This converts URLs and non-executable file paths into a runnable command like ShellExecute().
* @param raw_cmd The raw command provided by the user.
* @param working_dir The working directory for the new process.
* @param token The user token currently being impersonated or `NULL` if running as ourselves.
* @param creation_flags The creation flags for CreateProcess(), which may be modified by this function.
* @return A command string suitable for use by CreateProcess().
*/
std::wstring
resolve_command_string(const std::string &raw_cmd, const std::wstring &working_dir, HANDLE token, DWORD &creation_flags) {
std::wstring raw_cmd_w = from_utf8(raw_cmd);
// First, convert the given command into parts so we can get the executable/file/URL without parameters
auto raw_cmd_parts = boost::program_options::split_winmain(raw_cmd_w);
if (raw_cmd_parts.empty()) {
// This is highly unexpected, but we'll just return the raw string and hope for the best.
BOOST_LOG(warning) << "Failed to split command string: "sv << raw_cmd;
return from_utf8(raw_cmd);
}
auto raw_target = raw_cmd_parts.at(0);
std::wstring lookup_string;
HRESULT res;
if (PathIsURLW(raw_target.c_str())) {
std::array<WCHAR, 128> scheme;
DWORD out_len = scheme.size();
res = UrlGetPartW(raw_target.c_str(), scheme.data(), &out_len, URL_PART_SCHEME, 0);
if (res != S_OK) {
BOOST_LOG(warning) << "Failed to extract URL scheme from URL: "sv << raw_target << " ["sv << util::hex(res).to_string_view() << ']';
return from_utf8(raw_cmd);
}
// If the target is a URL, the class is found using the URL scheme (prior to and not including the ':')
lookup_string = scheme.data();
}
else {
// If the target is not a URL, assume it's a regular file path
auto extension = PathFindExtensionW(raw_target.c_str());
if (extension == nullptr || *extension == 0) {
// If the file has no extension, assume it's a command and allow CreateProcess()
// to try to find it via PATH
return from_utf8(raw_cmd);
}
else if (boost::iequals(extension, L".exe")) {
// If the file has an .exe extension, we will bypass the resolution here and
// directly pass the unmodified command string to CreateProcess(). The argument
// escaping rules are subtly different between CreateProcess() and ShellExecute(),
// and we want to preserve backwards compatibility with older configs.
return from_utf8(raw_cmd);
}
// For regular files, the class is found using the file extension (including the dot)
lookup_string = extension;
}
std::array<WCHAR, MAX_PATH> shell_command_string;
bool needs_cmd_escaping = false;
{
// Overriding these predefined keys affects process-wide state, so serialize all calls
// to ensure the handle state is consistent while we perform the command query.
static std::mutex per_user_key_mutex;
auto lg = std::lock_guard(per_user_key_mutex);
// Override HKEY_CLASSES_ROOT and HKEY_CURRENT_USER to ensure we query the correct class info
if (!override_per_user_predefined_keys(token)) {
return from_utf8(raw_cmd);
}
// Find the command string for the specified class
DWORD out_len = shell_command_string.size();
res = AssocQueryStringW(ASSOCF_NOTRUNCATE, ASSOCSTR_COMMAND, lookup_string.c_str(), L"open", shell_command_string.data(), &out_len);
// In some cases (UWP apps), we might not have a command for this target. If that happens,
// we'll have to launch via cmd.exe. This prevents proper job tracking, but that was already
// broken for UWP apps anyway due to how they are started by Windows. Even 'start /wait'
// doesn't work properly for UWP, so really no termination tracking seems to work at all.
//
// FIXME: Maybe we can improve this in the future.
if (res == HRESULT_FROM_WIN32(ERROR_NO_ASSOCIATION)) {
BOOST_LOG(warning) << "Using trampoline to handle target: "sv << raw_cmd;
std::wcscpy(shell_command_string.data(), L"cmd.exe /c start \"\" /wait \"%1\" %*");
needs_cmd_escaping = true;
// We must suppress the console window that would otherwise appear when starting cmd.exe.
creation_flags &= ~CREATE_NEW_CONSOLE;
creation_flags |= CREATE_NO_WINDOW;
res = S_OK;
}
// Reset per-user keys back to the original value
override_per_user_predefined_keys(NULL);
}
if (res != S_OK) {
BOOST_LOG(warning) << "Failed to query command string for raw command: "sv << raw_cmd << " ["sv << util::hex(res).to_string_view() << ']';
return from_utf8(raw_cmd);
}
// Finally, construct the real command string that will be passed into CreateProcess().
// We support common substitutions (%*, %1, %2, %L, %W, %V, etc), but there are other
// uncommon ones that are unsupported here.
//
// https://web.archive.org/web/20111002101214/http://msdn.microsoft.com/en-us/library/windows/desktop/cc144101(v=vs.85).aspx
std::wstring cmd_string { shell_command_string.data() };
size_t match_pos = 0;
while ((match_pos = cmd_string.find_first_of(L'%', match_pos)) != std::wstring::npos) {
std::wstring match_replacement;
// If no additional character exists after the match, the dangling '%' is stripped
if (match_pos + 1 == cmd_string.size()) {
cmd_string.erase(match_pos, 1);
break;
}
// Shell command replacements are strictly '%' followed by a single non-'%' character
auto next_char = std::tolower(cmd_string.at(match_pos + 1));
switch (next_char) {
// Escape character
case L'%':
match_replacement = L'%';
break;
// Argument replacements
case L'0':
case L'1':
case L'2':
case L'3':
case L'4':
case L'5':
case L'6':
case L'7':
case L'8':
case L'9': {
// Arguments numbers are 1-based, except for %0 which is equivalent to %1
int index = next_char - L'0';
if (next_char != L'0') {
index--;
}
// Replace with the matching argument, or nothing if the index is invalid
if (index < raw_cmd_parts.size()) {
match_replacement = raw_cmd_parts.at(index);
}
break;
}
// All arguments following the target
case L'*':
for (int i = 1; i < raw_cmd_parts.size(); i++) {
// Insert a space before arguments after the first one
if (i > 1) {
match_replacement += L' ';
}
// Argument escaping applies only to %*, not the single substitutions like %2
auto escaped_argument = escape_argument(raw_cmd_parts.at(i));
if (needs_cmd_escaping) {
// If we're using the cmd.exe trampoline, we'll need to add additional escaping
escaped_argument = escape_argument_for_cmd(escaped_argument);
}
match_replacement += escaped_argument;
}
break;
// Long file path of target
case L'l':
case L'd':
case L'v': {
std::array<WCHAR, MAX_PATH> path;
std::array<PCWCHAR, 2> other_dirs { working_dir.c_str(), nullptr };
// PathFindOnPath() is a little gross because it uses the same
// buffer for input and output, so we need to copy our input
// into the path array.
std::wcsncpy(path.data(), raw_target.c_str(), path.size());
if (path[path.size() - 1] != 0) {
// The path was so long it was truncated by this copy. We'll
// assume it was an absolute path (likely) and use it unmodified.
match_replacement = raw_target;
}
// See if we can find the path on our search path or working directory
else if (PathFindOnPathW(path.data(), other_dirs.data())) {
match_replacement = std::wstring { path.data() };
}
else {
// We couldn't find the target, so we'll just hope for the best
match_replacement = raw_target;
}
break;
}
// Working directory
case L'w':
match_replacement = working_dir;
break;
default:
BOOST_LOG(warning) << "Unsupported argument replacement: %%" << next_char;
break;
}
// Replace the % and following character with the match replacement
cmd_string.replace(match_pos, 2, match_replacement);
// Skip beyond the match replacement itself to prevent recursive replacement
match_pos += match_replacement.size();
}
BOOST_LOG(info) << "Resolved user-provided command '"sv << raw_cmd << "' to '"sv << cmd_string << '\'';
return cmd_string;
}
/**
* @brief Run a command on the users profile.
*
* Launches a child process as the user, using the current user's environment and a specific working directory.
*
* @param elevated Specify whether to elevate the process.
* @param interactive Specify whether this will run in a window or hidden.
* @param cmd The command to run.
* @param working_dir The working directory for the new process.
* @param env The environment variables to use for the new process.
* @param file A file object to redirect the child process's output to (may be `nullptr`).
* @param ec An error code, set to indicate any errors that occur during the launch process.
* @param group A pointer to a `bp::group` object to which the new process should belong (may be `nullptr`).
* @return A `bp::child` object representing the new process, or an empty `bp::child` object if the launch fails.
*/
bp::child
run_command(bool elevated, bool interactive, const std::string &cmd, boost::filesystem::path &working_dir, const bp::environment &env, FILE *file, std::error_code &ec, bp::group *group) {
std::wstring start_dir = from_utf8(working_dir.string());
HANDLE job = group ? group->native_handle() : nullptr;
STARTUPINFOEXW startup_info = create_startup_info(file, job ? &job : nullptr, ec);
PROCESS_INFORMATION process_info;
// Clone the environment to create a local copy. Boost.Process (bp) shares the environment with all spawned processes.
// Since we're going to modify the 'env' variable by merging user-specific environment variables into it,
// we make a clone to prevent side effects to the shared environment.
bp::environment cloned_env = env;
if (ec) {
// In the event that startup_info failed, return a blank child process.
return bp::child();
}
// Use RAII to ensure the attribute list is freed when we're done with it
auto attr_list_free = util::fail_guard([list = startup_info.lpAttributeList]() {
free_proc_thread_attr_list(list);
});
DWORD creation_flags = EXTENDED_STARTUPINFO_PRESENT | CREATE_UNICODE_ENVIRONMENT | CREATE_BREAKAWAY_FROM_JOB;
// Create a new console for interactive processes and use no console for non-interactive processes
creation_flags |= interactive ? CREATE_NEW_CONSOLE : CREATE_NO_WINDOW;
// Find the PATH variable in our environment block using a case-insensitive search
auto sunshine_wenv = boost::this_process::wenvironment();
std::wstring path_var_name { L"PATH" };
std::wstring old_path_val;
auto itr = std::find_if(sunshine_wenv.cbegin(), sunshine_wenv.cend(), [&](const auto &e) { return boost::iequals(e.get_name(), path_var_name); });
if (itr != sunshine_wenv.cend()) {
// Use the existing variable if it exists, since Boost treats these as case-sensitive.
path_var_name = itr->get_name();
old_path_val = sunshine_wenv[path_var_name].to_string();
}
// Temporarily prepend the specified working directory to PATH to ensure CreateProcess()
// will (preferentially) find binaries that reside in the working directory.
sunshine_wenv[path_var_name].assign(start_dir + L";" + old_path_val);
// Restore the old PATH value for our process when we're done here
auto restore_path = util::fail_guard([&]() {
if (old_path_val.empty()) {
sunshine_wenv[path_var_name].clear();
}
else {
sunshine_wenv[path_var_name].assign(old_path_val);
}
});
BOOL ret;
if (is_running_as_system()) {
// Duplicate the current user's token
HANDLE user_token = retrieve_users_token(elevated);
if (!user_token) {
// Fail the launch rather than risking launching with Sunshine's permissions unmodified.
ec = std::make_error_code(std::errc::permission_denied);
return bp::child();
}
// Use RAII to ensure the shell token is closed when we're done with it
auto token_close = util::fail_guard([user_token]() {
CloseHandle(user_token);
});
// Populate env with user-specific environment variables
if (!merge_user_environment_block(cloned_env, user_token)) {
ec = std::make_error_code(std::errc::not_enough_memory);
return bp::child();
}
// Open the process as the current user account, elevation is handled in the token itself.
ec = impersonate_current_user(user_token, [&]() {
std::wstring env_block = create_environment_block(cloned_env);
std::wstring wcmd = resolve_command_string(cmd, start_dir, user_token, creation_flags);
ret = CreateProcessAsUserW(user_token,
NULL,
(LPWSTR) wcmd.c_str(),
NULL,
NULL,
!!(startup_info.StartupInfo.dwFlags & STARTF_USESTDHANDLES),
creation_flags,
env_block.data(),
start_dir.empty() ? NULL : start_dir.c_str(),
(LPSTARTUPINFOW) &startup_info,
&process_info);
});
}
// Otherwise, launch the process using CreateProcessW()
// This will inherit the elevation of whatever the user launched Sunshine with.
else {
// Open our current token to resolve environment variables
HANDLE process_token;
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY | TOKEN_DUPLICATE, &process_token)) {
ec = std::make_error_code(std::errc::permission_denied);
return bp::child();
}
auto token_close = util::fail_guard([process_token]() {
CloseHandle(process_token);
});
// Populate env with user-specific environment variables
if (!merge_user_environment_block(cloned_env, process_token)) {
ec = std::make_error_code(std::errc::not_enough_memory);
return bp::child();
}
std::wstring env_block = create_environment_block(cloned_env);
std::wstring wcmd = resolve_command_string(cmd, start_dir, NULL, creation_flags);
ret = CreateProcessW(NULL,
(LPWSTR) wcmd.c_str(),
NULL,
NULL,
!!(startup_info.StartupInfo.dwFlags & STARTF_USESTDHANDLES),
creation_flags,
env_block.data(),
start_dir.empty() ? NULL : start_dir.c_str(),
(LPSTARTUPINFOW) &startup_info,
&process_info);
}
// Use the results of the launch to create a bp::child object
return create_boost_child_from_results(ret, cmd, ec, process_info);
}
/**
* @brief Open a url in the default web browser.
* @param url The url to open.
*/
void
open_url(const std::string &url) {
boost::process::v1::environment _env = boost::this_process::environment();
auto working_dir = boost::filesystem::path();
std::error_code ec;
auto child = run_command(false, false, url, working_dir, _env, nullptr, ec, nullptr);
if (ec) {
BOOST_LOG(warning) << "Couldn't open url ["sv << url << "]: System: "sv << ec.message();
}
else {
BOOST_LOG(info) << "Opened url ["sv << url << "]"sv;
child.detach();
}
}
void
adjust_thread_priority(thread_priority_e priority) {
int win32_priority;
switch (priority) {
case thread_priority_e::low:
win32_priority = THREAD_PRIORITY_BELOW_NORMAL;
break;
case thread_priority_e::normal:
win32_priority = THREAD_PRIORITY_NORMAL;
break;
case thread_priority_e::high:
win32_priority = THREAD_PRIORITY_ABOVE_NORMAL;
break;
case thread_priority_e::critical:
win32_priority = THREAD_PRIORITY_HIGHEST;
break;
default:
BOOST_LOG(error) << "Unknown thread priority: "sv << (int) priority;
return;
}
if (!SetThreadPriority(GetCurrentThread(), win32_priority)) {
auto winerr = GetLastError();
BOOST_LOG(warning) << "Unable to set thread priority to "sv << win32_priority << ": "sv << winerr;
}
}
void
streaming_will_start() {
static std::once_flag load_wlanapi_once_flag;
std::call_once(load_wlanapi_once_flag, []() {
// wlanapi.dll is not installed by default on Windows Server, so we load it dynamically
HMODULE wlanapi = LoadLibraryExA("wlanapi.dll", NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
if (!wlanapi) {
BOOST_LOG(debug) << "wlanapi.dll is not available on this OS"sv;
return;
}
fn_WlanOpenHandle = (decltype(fn_WlanOpenHandle)) GetProcAddress(wlanapi, "WlanOpenHandle");
fn_WlanCloseHandle = (decltype(fn_WlanCloseHandle)) GetProcAddress(wlanapi, "WlanCloseHandle");
fn_WlanFreeMemory = (decltype(fn_WlanFreeMemory)) GetProcAddress(wlanapi, "WlanFreeMemory");
fn_WlanEnumInterfaces = (decltype(fn_WlanEnumInterfaces)) GetProcAddress(wlanapi, "WlanEnumInterfaces");
fn_WlanSetInterface = (decltype(fn_WlanSetInterface)) GetProcAddress(wlanapi, "WlanSetInterface");
if (!fn_WlanOpenHandle || !fn_WlanCloseHandle || !fn_WlanFreeMemory || !fn_WlanEnumInterfaces || !fn_WlanSetInterface) {
BOOST_LOG(error) << "wlanapi.dll is missing exports?"sv;
fn_WlanOpenHandle = nullptr;
fn_WlanCloseHandle = nullptr;
fn_WlanFreeMemory = nullptr;
fn_WlanEnumInterfaces = nullptr;
fn_WlanSetInterface = nullptr;
FreeLibrary(wlanapi);
return;
}
});
// Enable MMCSS scheduling for DWM
DwmEnableMMCSS(true);
// Reduce timer period to 0.5ms
if (nt_set_timer_resolution_max()) {
used_nt_set_timer_resolution = true;
}
else {
BOOST_LOG(error) << "NtSetTimerResolution() failed, falling back to timeBeginPeriod()";
timeBeginPeriod(1);
used_nt_set_timer_resolution = false;
}
// Promote ourselves to high priority class
SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);
// Modify NVIDIA control panel settings again, in case they have been changed externally since sunshine launch
if (nvprefs_instance.load()) {
if (!nvprefs_instance.owning_undo_file()) {
nvprefs_instance.restore_from_and_delete_undo_file_if_exists();
}
nvprefs_instance.modify_application_profile();
nvprefs_instance.modify_global_profile();
nvprefs_instance.unload();
}
// Enable low latency mode on all connected WLAN NICs if wlanapi.dll is available
if (fn_WlanOpenHandle) {
DWORD negotiated_version;
if (fn_WlanOpenHandle(WLAN_API_MAKE_VERSION(2, 0), nullptr, &negotiated_version, &wlan_handle) == ERROR_SUCCESS) {
PWLAN_INTERFACE_INFO_LIST wlan_interface_list;
if (fn_WlanEnumInterfaces(wlan_handle, nullptr, &wlan_interface_list) == ERROR_SUCCESS) {
for (DWORD i = 0; i < wlan_interface_list->dwNumberOfItems; i++) {
if (wlan_interface_list->InterfaceInfo[i].isState == wlan_interface_state_connected) {
// Enable media streaming mode for 802.11 wireless interfaces to reduce latency and
// unnecessary background scanning operations that cause packet loss and jitter.
//
// https://docs.microsoft.com/en-us/windows-hardware/drivers/network/oid-wdi-set-connection-quality
// https://docs.microsoft.com/en-us/previous-versions/windows/hardware/wireless/native-802-11-media-streaming
BOOL value = TRUE;
auto error = fn_WlanSetInterface(wlan_handle, &wlan_interface_list->InterfaceInfo[i].InterfaceGuid,
wlan_intf_opcode_media_streaming_mode, sizeof(value), &value, nullptr);
if (error == ERROR_SUCCESS) {
BOOST_LOG(info) << "WLAN interface "sv << i << " is now in low latency mode"sv;
}
}
}
fn_WlanFreeMemory(wlan_interface_list);
}
else {
fn_WlanCloseHandle(wlan_handle, nullptr);
wlan_handle = NULL;
}
}
}
// If there is no mouse connected, enable Mouse Keys to force the cursor to appear
if (!GetSystemMetrics(SM_MOUSEPRESENT)) {
BOOST_LOG(info) << "A mouse was not detected. Sunshine will enable Mouse Keys while streaming to force the mouse cursor to appear.";
// Get the current state of Mouse Keys so we can restore it when streaming is over
previous_mouse_keys_state.cbSize = sizeof(previous_mouse_keys_state);
if (SystemParametersInfoW(SPI_GETMOUSEKEYS, 0, &previous_mouse_keys_state, 0)) {
MOUSEKEYS new_mouse_keys_state = {};
// Enable Mouse Keys
new_mouse_keys_state.cbSize = sizeof(new_mouse_keys_state);
new_mouse_keys_state.dwFlags = MKF_MOUSEKEYSON | MKF_AVAILABLE;
new_mouse_keys_state.iMaxSpeed = 10;
new_mouse_keys_state.iTimeToMaxSpeed = 1000;
if (SystemParametersInfoW(SPI_SETMOUSEKEYS, 0, &new_mouse_keys_state, 0)) {
// Remember to restore the previous settings when we stop streaming
enabled_mouse_keys = true;
}
else {
auto winerr = GetLastError();
BOOST_LOG(warning) << "Unable to enable Mouse Keys: "sv << winerr;
}
}
else {
auto winerr = GetLastError();
BOOST_LOG(warning) << "Unable to get current state of Mouse Keys: "sv << winerr;
}
}
}
void
streaming_will_stop() {
// Demote ourselves back to normal priority class
SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS);
// End our 0.5ms timer request
if (used_nt_set_timer_resolution) {
used_nt_set_timer_resolution = false;
if (!nt_set_timer_resolution_min()) {
BOOST_LOG(error) << "nt_set_timer_resolution_min() failed even though nt_set_timer_resolution_max() succeeded";
}
}
else {
timeEndPeriod(1);
}
// Disable MMCSS scheduling for DWM
DwmEnableMMCSS(false);
// Closing our WLAN client handle will undo our optimizations
if (wlan_handle != nullptr) {
fn_WlanCloseHandle(wlan_handle, nullptr);
wlan_handle = nullptr;
}
// Restore Mouse Keys back to the previous settings if we turned it on
if (enabled_mouse_keys) {
enabled_mouse_keys = false;
if (!SystemParametersInfoW(SPI_SETMOUSEKEYS, 0, &previous_mouse_keys_state, 0)) {
auto winerr = GetLastError();
BOOST_LOG(warning) << "Unable to restore original state of Mouse Keys: "sv << winerr;
}
}
}
void
restart_on_exit() {
STARTUPINFOEXW startup_info {};
startup_info.StartupInfo.cb = sizeof(startup_info);
WCHAR executable[MAX_PATH];
if (GetModuleFileNameW(NULL, executable, ARRAYSIZE(executable)) == 0) {
auto winerr = GetLastError();
BOOST_LOG(fatal) << "Failed to get Sunshine path: "sv << winerr;
return;
}
PROCESS_INFORMATION process_info;
if (!CreateProcessW(executable,
GetCommandLineW(),
nullptr,
nullptr,
false,
CREATE_UNICODE_ENVIRONMENT | EXTENDED_STARTUPINFO_PRESENT,
nullptr,
nullptr,
(LPSTARTUPINFOW) &startup_info,
&process_info)) {
auto winerr = GetLastError();
BOOST_LOG(fatal) << "Unable to restart Sunshine: "sv << winerr;
return;
}
CloseHandle(process_info.hProcess);
CloseHandle(process_info.hThread);
}
void
restart() {
// If we're running standalone, we have to respawn ourselves via CreateProcess().
// If we're running from the service, we should just exit and let it respawn us.
if (GetConsoleWindow() != NULL) {
// Avoid racing with the new process by waiting until we're exiting to start it.
atexit(restart_on_exit);
}
// We use an async exit call here because we can't block the HTTP thread or we'll hang shutdown.
lifetime::exit_sunshine(0, true);
}
int
set_env(const std::string &name, const std::string &value) {
return _putenv_s(name.c_str(), value.c_str());
}
int
unset_env(const std::string &name) {
return _putenv_s(name.c_str(), "");
}
struct enum_wnd_context_t {
std::set<DWORD> process_ids;
bool requested_exit;
};
static BOOL CALLBACK
prgrp_enum_windows(HWND hwnd, LPARAM lParam) {
auto enum_ctx = (enum_wnd_context_t *) lParam;
// Find the owner PID of this window
DWORD wnd_process_id;
if (!GetWindowThreadProcessId(hwnd, &wnd_process_id)) {
// Continue enumeration
return TRUE;
}
// Check if this window is owned by a process we want to terminate
if (enum_ctx->process_ids.find(wnd_process_id) != enum_ctx->process_ids.end()) {
// Send an async WM_CLOSE message to this window
if (SendNotifyMessageW(hwnd, WM_CLOSE, 0, 0)) {
BOOST_LOG(debug) << "Sent WM_CLOSE to PID: "sv << wnd_process_id;
enum_ctx->requested_exit = true;
}
else {
auto error = GetLastError();
BOOST_LOG(warning) << "Failed to send WM_CLOSE to PID ["sv << wnd_process_id << "]: " << error;
}
}
// Continue enumeration
return TRUE;
}
bool
request_process_group_exit(std::uintptr_t native_handle) {
auto job_handle = (HANDLE) native_handle;
// Get list of all processes in our job object
bool success;
DWORD required_length = sizeof(JOBOBJECT_BASIC_PROCESS_ID_LIST);
auto process_id_list = (PJOBOBJECT_BASIC_PROCESS_ID_LIST) calloc(1, required_length);
auto fg = util::fail_guard([&process_id_list]() {
free(process_id_list);
});
while (!(success = QueryInformationJobObject(job_handle, JobObjectBasicProcessIdList,
process_id_list, required_length, &required_length)) &&
GetLastError() == ERROR_MORE_DATA) {
free(process_id_list);
process_id_list = (PJOBOBJECT_BASIC_PROCESS_ID_LIST) calloc(1, required_length);
if (!process_id_list) {
return false;
}
}
if (!success) {
auto err = GetLastError();
BOOST_LOG(warning) << "Failed to enumerate processes in group: "sv << err;
return false;
}
else if (process_id_list->NumberOfProcessIdsInList == 0) {
// If all processes are already dead, treat it as a success
return true;
}
enum_wnd_context_t enum_ctx = {};
enum_ctx.requested_exit = false;
for (DWORD i = 0; i < process_id_list->NumberOfProcessIdsInList; i++) {
enum_ctx.process_ids.emplace(process_id_list->ProcessIdList[i]);
}
// Enumerate all windows belonging to processes in the list
EnumWindows(prgrp_enum_windows, (LPARAM) &enum_ctx);
// Return success if we told at least one window to close
return enum_ctx.requested_exit;
}
bool
process_group_running(std::uintptr_t native_handle) {
JOBOBJECT_BASIC_ACCOUNTING_INFORMATION accounting_info;
if (!QueryInformationJobObject((HANDLE) native_handle, JobObjectBasicAccountingInformation, &accounting_info, sizeof(accounting_info), nullptr)) {
auto err = GetLastError();
BOOST_LOG(error) << "Failed to get job accounting info: "sv << err;
return false;
}
return accounting_info.ActiveProcesses != 0;
}
SOCKADDR_IN
to_sockaddr(boost::asio::ip::address_v4 address, uint16_t port) {
SOCKADDR_IN saddr_v4 = {};
saddr_v4.sin_family = AF_INET;
saddr_v4.sin_port = htons(port);
auto addr_bytes = address.to_bytes();
memcpy(&saddr_v4.sin_addr, addr_bytes.data(), sizeof(saddr_v4.sin_addr));
return saddr_v4;
}
SOCKADDR_IN6
to_sockaddr(boost::asio::ip::address_v6 address, uint16_t port) {
SOCKADDR_IN6 saddr_v6 = {};
saddr_v6.sin6_family = AF_INET6;
saddr_v6.sin6_port = htons(port);
saddr_v6.sin6_scope_id = address.scope_id();
auto addr_bytes = address.to_bytes();
memcpy(&saddr_v6.sin6_addr, addr_bytes.data(), sizeof(saddr_v6.sin6_addr));
return saddr_v6;
}
// Use UDP segmentation offload if it is supported by the OS. If the NIC is capable, this will use
// hardware acceleration to reduce CPU usage. Support for USO was introduced in Windows 10 20H1.
bool
send_batch(batched_send_info_t &send_info) {
WSAMSG msg;
// Convert the target address into a SOCKADDR
SOCKADDR_IN taddr_v4;
SOCKADDR_IN6 taddr_v6;
if (send_info.target_address.is_v6()) {
taddr_v6 = to_sockaddr(send_info.target_address.to_v6(), send_info.target_port);
msg.name = (PSOCKADDR) &taddr_v6;
msg.namelen = sizeof(taddr_v6);
}
else {
taddr_v4 = to_sockaddr(send_info.target_address.to_v4(), send_info.target_port);
msg.name = (PSOCKADDR) &taddr_v4;
msg.namelen = sizeof(taddr_v4);
}
auto const max_bufs_per_msg = send_info.payload_buffers.size() + (send_info.headers ? 1 : 0);
WSABUF bufs[(send_info.headers ? send_info.block_count : 1) * max_bufs_per_msg];
DWORD bufcount = 0;
if (send_info.headers) {
// Interleave buffers for headers and payloads
for (auto i = 0; i < send_info.block_count; i++) {
bufs[bufcount].buf = (char *) &send_info.headers[(send_info.block_offset + i) * send_info.header_size];
bufs[bufcount].len = send_info.header_size;
bufcount++;
auto payload_desc = send_info.buffer_for_payload_offset((send_info.block_offset + i) * send_info.payload_size);
bufs[bufcount].buf = (char *) payload_desc.buffer;
bufs[bufcount].len = send_info.payload_size;
bufcount++;
}
}
else {
// Translate buffer descriptors into WSABUFs
auto payload_offset = send_info.block_offset * send_info.payload_size;
auto payload_length = payload_offset + (send_info.block_count * send_info.payload_size);
while (payload_offset < payload_length) {
auto payload_desc = send_info.buffer_for_payload_offset(payload_offset);
bufs[bufcount].buf = (char *) payload_desc.buffer;
bufs[bufcount].len = std::min(payload_desc.size, payload_length - payload_offset);
payload_offset += bufs[bufcount].len;
bufcount++;
}
}
msg.lpBuffers = bufs;
msg.dwBufferCount = bufcount;
msg.dwFlags = 0;
// At most, one DWORD option and one PKTINFO option
char cmbuf[WSA_CMSG_SPACE(sizeof(DWORD)) +
std::max(WSA_CMSG_SPACE(sizeof(IN6_PKTINFO)), WSA_CMSG_SPACE(sizeof(IN_PKTINFO)))] = {};
ULONG cmbuflen = 0;
msg.Control.buf = cmbuf;
msg.Control.len = sizeof(cmbuf);
auto cm = WSA_CMSG_FIRSTHDR(&msg);
if (send_info.source_address.is_v6()) {
IN6_PKTINFO pktInfo;
SOCKADDR_IN6 saddr_v6 = to_sockaddr(send_info.source_address.to_v6(), 0);
pktInfo.ipi6_addr = saddr_v6.sin6_addr;
pktInfo.ipi6_ifindex = 0;
cmbuflen += WSA_CMSG_SPACE(sizeof(pktInfo));
cm->cmsg_level = IPPROTO_IPV6;
cm->cmsg_type = IPV6_PKTINFO;
cm->cmsg_len = WSA_CMSG_LEN(sizeof(pktInfo));
memcpy(WSA_CMSG_DATA(cm), &pktInfo, sizeof(pktInfo));
}
else {
IN_PKTINFO pktInfo;
SOCKADDR_IN saddr_v4 = to_sockaddr(send_info.source_address.to_v4(), 0);
pktInfo.ipi_addr = saddr_v4.sin_addr;
pktInfo.ipi_ifindex = 0;
cmbuflen += WSA_CMSG_SPACE(sizeof(pktInfo));
cm->cmsg_level = IPPROTO_IP;
cm->cmsg_type = IP_PKTINFO;
cm->cmsg_len = WSA_CMSG_LEN(sizeof(pktInfo));
memcpy(WSA_CMSG_DATA(cm), &pktInfo, sizeof(pktInfo));
}
if (send_info.block_count > 1) {
cmbuflen += WSA_CMSG_SPACE(sizeof(DWORD));
cm = WSA_CMSG_NXTHDR(&msg, cm);
cm->cmsg_level = IPPROTO_UDP;
cm->cmsg_type = UDP_SEND_MSG_SIZE;
cm->cmsg_len = WSA_CMSG_LEN(sizeof(DWORD));
*((DWORD *) WSA_CMSG_DATA(cm)) = send_info.header_size + send_info.payload_size;
}
msg.Control.len = cmbuflen;
// If USO is not supported, this will fail and the caller will fall back to unbatched sends.
DWORD bytes_sent;
return WSASendMsg((SOCKET) send_info.native_socket, &msg, 0, &bytes_sent, nullptr, nullptr) != SOCKET_ERROR;
}
bool
send(send_info_t &send_info) {
WSAMSG msg;
// Convert the target address into a SOCKADDR
SOCKADDR_IN taddr_v4;
SOCKADDR_IN6 taddr_v6;
if (send_info.target_address.is_v6()) {
taddr_v6 = to_sockaddr(send_info.target_address.to_v6(), send_info.target_port);
msg.name = (PSOCKADDR) &taddr_v6;
msg.namelen = sizeof(taddr_v6);
}
else {
taddr_v4 = to_sockaddr(send_info.target_address.to_v4(), send_info.target_port);
msg.name = (PSOCKADDR) &taddr_v4;
msg.namelen = sizeof(taddr_v4);
}
WSABUF bufs[2];
DWORD bufcount = 0;
if (send_info.header) {
bufs[bufcount].buf = (char *) send_info.header;
bufs[bufcount].len = send_info.header_size;
bufcount++;
}
bufs[bufcount].buf = (char *) send_info.payload;
bufs[bufcount].len = send_info.payload_size;
bufcount++;
msg.lpBuffers = bufs;
msg.dwBufferCount = bufcount;
msg.dwFlags = 0;
char cmbuf[std::max(WSA_CMSG_SPACE(sizeof(IN6_PKTINFO)), WSA_CMSG_SPACE(sizeof(IN_PKTINFO)))] = {};
ULONG cmbuflen = 0;
msg.Control.buf = cmbuf;
msg.Control.len = sizeof(cmbuf);
auto cm = WSA_CMSG_FIRSTHDR(&msg);
if (send_info.source_address.is_v6()) {
IN6_PKTINFO pktInfo;
SOCKADDR_IN6 saddr_v6 = to_sockaddr(send_info.source_address.to_v6(), 0);
pktInfo.ipi6_addr = saddr_v6.sin6_addr;
pktInfo.ipi6_ifindex = 0;
cmbuflen += WSA_CMSG_SPACE(sizeof(pktInfo));
cm->cmsg_level = IPPROTO_IPV6;
cm->cmsg_type = IPV6_PKTINFO;
cm->cmsg_len = WSA_CMSG_LEN(sizeof(pktInfo));
memcpy(WSA_CMSG_DATA(cm), &pktInfo, sizeof(pktInfo));
}
else {
IN_PKTINFO pktInfo;
SOCKADDR_IN saddr_v4 = to_sockaddr(send_info.source_address.to_v4(), 0);
pktInfo.ipi_addr = saddr_v4.sin_addr;
pktInfo.ipi_ifindex = 0;
cmbuflen += WSA_CMSG_SPACE(sizeof(pktInfo));
cm->cmsg_level = IPPROTO_IP;
cm->cmsg_type = IP_PKTINFO;
cm->cmsg_len = WSA_CMSG_LEN(sizeof(pktInfo));
memcpy(WSA_CMSG_DATA(cm), &pktInfo, sizeof(pktInfo));
}
msg.Control.len = cmbuflen;
DWORD bytes_sent;
if (WSASendMsg((SOCKET) send_info.native_socket, &msg, 0, &bytes_sent, nullptr, nullptr) == SOCKET_ERROR) {
auto winerr = WSAGetLastError();
BOOST_LOG(warning) << "WSASendMsg() failed: "sv << winerr;
return false;
}
return true;
}
class qos_t: public deinit_t {
public:
qos_t(QOS_FLOWID flow_id):
flow_id(flow_id) {}
virtual ~qos_t() {
if (!fn_QOSRemoveSocketFromFlow(qos_handle, (SOCKET) NULL, flow_id, 0)) {
auto winerr = GetLastError();
BOOST_LOG(warning) << "QOSRemoveSocketFromFlow() failed: "sv << winerr;
}
}
private:
QOS_FLOWID flow_id;
};
/**
* @brief Enables QoS on the given socket for traffic to the specified destination.
* @param native_socket The native socket handle.
* @param address The destination address for traffic sent on this socket.
* @param port The destination port for traffic sent on this socket.
* @param data_type The type of traffic sent on this socket.
* @param dscp_tagging Specifies whether to enable DSCP tagging on outgoing traffic.
*/
std::unique_ptr<deinit_t>
enable_socket_qos(uintptr_t native_socket, boost::asio::ip::address &address, uint16_t port, qos_data_type_e data_type, bool dscp_tagging) {
SOCKADDR_IN saddr_v4;
SOCKADDR_IN6 saddr_v6;
PSOCKADDR dest_addr;
bool using_connect_hack = false;
// Windows doesn't support any concept of traffic priority without DSCP tagging
if (!dscp_tagging) {
return nullptr;
}
static std::once_flag load_qwave_once_flag;
std::call_once(load_qwave_once_flag, []() {
// qWAVE is not installed by default on Windows Server, so we load it dynamically
HMODULE qwave = LoadLibraryExA("qwave.dll", NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
if (!qwave) {
BOOST_LOG(debug) << "qwave.dll is not available on this OS"sv;
return;
}
fn_QOSCreateHandle = (decltype(fn_QOSCreateHandle)) GetProcAddress(qwave, "QOSCreateHandle");
fn_QOSAddSocketToFlow = (decltype(fn_QOSAddSocketToFlow)) GetProcAddress(qwave, "QOSAddSocketToFlow");
fn_QOSRemoveSocketFromFlow = (decltype(fn_QOSRemoveSocketFromFlow)) GetProcAddress(qwave, "QOSRemoveSocketFromFlow");
if (!fn_QOSCreateHandle || !fn_QOSAddSocketToFlow || !fn_QOSRemoveSocketFromFlow) {
BOOST_LOG(error) << "qwave.dll is missing exports?"sv;
fn_QOSCreateHandle = nullptr;
fn_QOSAddSocketToFlow = nullptr;
fn_QOSRemoveSocketFromFlow = nullptr;
FreeLibrary(qwave);
return;
}
QOS_VERSION qos_version { 1, 0 };
if (!fn_QOSCreateHandle(&qos_version, &qos_handle)) {
auto winerr = GetLastError();
BOOST_LOG(warning) << "QOSCreateHandle() failed: "sv << winerr;
return;
}
});
// If qWAVE is unavailable, just return
if (!fn_QOSAddSocketToFlow || !qos_handle) {
return nullptr;
}
auto disconnect_fg = util::fail_guard([&]() {
if (using_connect_hack) {
SOCKADDR_IN6 empty = {};
empty.sin6_family = AF_INET6;
if (connect((SOCKET) native_socket, (PSOCKADDR) &empty, sizeof(empty)) < 0) {
auto wsaerr = WSAGetLastError();
BOOST_LOG(error) << "qWAVE dual-stack workaround failed: "sv << wsaerr;
}
}
});
if (address.is_v6()) {
auto address_v6 = address.to_v6();
saddr_v6 = to_sockaddr(address_v6, port);
dest_addr = (PSOCKADDR) &saddr_v6;
// qWAVE doesn't properly support IPv4-mapped IPv6 addresses, nor does it
// correctly support IPv4 addresses on a dual-stack socket (despite MSDN's
// claims to the contrary). To get proper QoS tagging when hosting in dual
// stack mode, we will temporarily connect() the socket to allow qWAVE to
// successfully initialize a flow, then disconnect it again so WSASendMsg()
// works later on.
if (address_v6.is_v4_mapped()) {
if (connect((SOCKET) native_socket, (PSOCKADDR) &saddr_v6, sizeof(saddr_v6)) < 0) {
auto wsaerr = WSAGetLastError();
BOOST_LOG(error) << "qWAVE dual-stack workaround failed: "sv << wsaerr;
}
else {
BOOST_LOG(debug) << "Using qWAVE connect() workaround for QoS tagging"sv;
using_connect_hack = true;
dest_addr = nullptr;
}
}
}
else {
saddr_v4 = to_sockaddr(address.to_v4(), port);
dest_addr = (PSOCKADDR) &saddr_v4;
}
QOS_TRAFFIC_TYPE traffic_type;
switch (data_type) {
case qos_data_type_e::audio:
traffic_type = QOSTrafficTypeVoice;
break;
case qos_data_type_e::video:
traffic_type = QOSTrafficTypeAudioVideo;
break;
default:
BOOST_LOG(error) << "Unknown traffic type: "sv << (int) data_type;
return nullptr;
}
QOS_FLOWID flow_id = 0;
if (!fn_QOSAddSocketToFlow(qos_handle, (SOCKET) native_socket, dest_addr, traffic_type, QOS_NON_ADAPTIVE_FLOW, &flow_id)) {
auto winerr = GetLastError();
BOOST_LOG(warning) << "QOSAddSocketToFlow() failed: "sv << winerr;
return nullptr;
}
return std::make_unique<qos_t>(flow_id);
}
int64_t
qpc_counter() {
LARGE_INTEGER performance_counter;
if (QueryPerformanceCounter(&performance_counter)) return performance_counter.QuadPart;
return 0;
}
std::chrono::nanoseconds
qpc_time_difference(int64_t performance_counter1, int64_t performance_counter2) {
auto get_frequency = []() {
LARGE_INTEGER frequency;
frequency.QuadPart = 0;
QueryPerformanceFrequency(&frequency);
return frequency.QuadPart;
};
static const double frequency = get_frequency();
if (frequency) {
return std::chrono::nanoseconds((int64_t) ((performance_counter1 - performance_counter2) * frequency / std::nano::den));
}
return {};
}
std::wstring
from_utf8(const std::string &string) {
// No conversion needed if the string is empty
if (string.empty()) {
return {};
}
// Get the output size required to store the string
auto output_size = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, string.data(), string.size(), nullptr, 0);
if (output_size == 0) {
auto winerr = GetLastError();
BOOST_LOG(error) << "Failed to get UTF-16 buffer size: "sv << winerr;
return {};
}
// Perform the conversion
std::wstring output(output_size, L'\0');
output_size = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, string.data(), string.size(), output.data(), output.size());
if (output_size == 0) {
auto winerr = GetLastError();
BOOST_LOG(error) << "Failed to convert string to UTF-16: "sv << winerr;
return {};
}
return output;
}
std::string
to_utf8(const std::wstring &string) {
// No conversion needed if the string is empty
if (string.empty()) {
return {};
}
// Get the output size required to store the string
auto output_size = WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, string.data(), string.size(),
nullptr, 0, nullptr, nullptr);
if (output_size == 0) {
auto winerr = GetLastError();
BOOST_LOG(error) << "Failed to get UTF-8 buffer size: "sv << winerr;
return {};
}
// Perform the conversion
std::string output(output_size, '\0');
output_size = WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, string.data(), string.size(),
output.data(), output.size(), nullptr, nullptr);
if (output_size == 0) {
auto winerr = GetLastError();
BOOST_LOG(error) << "Failed to convert string to UTF-8: "sv << winerr;
return {};
}
return output;
}
std::string
get_host_name() {
WCHAR hostname[256];
if (GetHostNameW(hostname, ARRAYSIZE(hostname)) == SOCKET_ERROR) {
BOOST_LOG(error) << "GetHostNameW() failed: "sv << WSAGetLastError();
return "Sunshine"s;
}
return to_utf8(hostname);
}
class win32_high_precision_timer: public high_precision_timer {
public:
win32_high_precision_timer() {
// Use CREATE_WAITABLE_TIMER_HIGH_RESOLUTION if supported (Windows 10 1809+)
timer = CreateWaitableTimerEx(nullptr, nullptr, CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, TIMER_ALL_ACCESS);
if (!timer) {
timer = CreateWaitableTimerEx(nullptr, nullptr, 0, TIMER_ALL_ACCESS);
if (!timer) {
BOOST_LOG(error) << "Unable to create high_precision_timer, CreateWaitableTimerEx() failed: " << GetLastError();
}
}
}
~win32_high_precision_timer() {
if (timer) CloseHandle(timer);
}
void
sleep_for(const std::chrono::nanoseconds &duration) override {
if (!timer) {
BOOST_LOG(error) << "Attempting high_precision_timer::sleep_for() with uninitialized timer";
return;
}
if (duration < 0s) {
BOOST_LOG(error) << "Attempting high_precision_timer::sleep_for() with negative duration";
return;
}
if (duration > 5s) {
BOOST_LOG(error) << "Attempting high_precision_timer::sleep_for() with unexpectedly large duration (>5s)";
return;
}
LARGE_INTEGER due_time;
due_time.QuadPart = duration.count() / -100;
SetWaitableTimer(timer, &due_time, 0, nullptr, nullptr, false);
WaitForSingleObject(timer, INFINITE);
}
operator bool() override {
return timer != NULL;
}
private:
HANDLE timer = NULL;
};
std::unique_ptr<high_precision_timer>
create_high_precision_timer() {
return std::make_unique<win32_high_precision_timer>();
}
} // namespace platf
| 70,386
|
C++
|
.cpp
| 1,634
| 36.729498
| 189
| 0.659798
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,019
|
display_wgc.cpp
|
LizardByte_Sunshine/src/platform/windows/display_wgc.cpp
|
/**
* @file src/platform/windows/display_wgc.cpp
* @brief Definitions for WinRT Windows.Graphics.Capture API
*/
#include <dxgi1_2.h>
#include "display.h"
#include "misc.h"
#include "src/logging.h"
// Gross hack to work around MINGW-packages#22160
#define ____FIReference_1_boolean_INTERFACE_DEFINED__
#include <windows.graphics.capture.interop.h>
#include <winrt/windows.foundation.h>
#include <winrt/windows.foundation.metadata.h>
#include <winrt/windows.graphics.directx.direct3d11.h>
namespace platf {
using namespace std::literals;
}
namespace winrt {
using namespace Windows::Foundation;
using namespace Windows::Foundation::Metadata;
using namespace Windows::Graphics::Capture;
using namespace Windows::Graphics::DirectX::Direct3D11;
extern "C" {
HRESULT __stdcall CreateDirect3D11DeviceFromDXGIDevice(::IDXGIDevice *dxgiDevice, ::IInspectable **graphicsDevice);
}
/**
* Windows structures sometimes have compile-time GUIDs. GCC supports this, but in a roundabout way.
* If WINRT_IMPL_HAS_DECLSPEC_UUID is true, then the compiler supports adding this attribute to a struct. For example, Visual Studio.
* If not, then MinGW GCC has a workaround to assign a GUID to a structure.
*/
struct
#if WINRT_IMPL_HAS_DECLSPEC_UUID
__declspec(uuid("A9B3D012-3DF2-4EE3-B8D1-8695F457D3C1"))
#endif
IDirect3DDxgiInterfaceAccess: ::IUnknown {
virtual HRESULT __stdcall GetInterface(REFIID id, void **object) = 0;
};
} // namespace winrt
#if !WINRT_IMPL_HAS_DECLSPEC_UUID
static constexpr GUID GUID__IDirect3DDxgiInterfaceAccess = {
0xA9B3D012, 0x3DF2, 0x4EE3, { 0xB8, 0xD1, 0x86, 0x95, 0xF4, 0x57, 0xD3, 0xC1 }
// compare with __declspec(uuid(...)) for the struct above.
};
template <>
constexpr auto
__mingw_uuidof<winrt::IDirect3DDxgiInterfaceAccess>() -> GUID const & {
return GUID__IDirect3DDxgiInterfaceAccess;
}
#endif
namespace platf::dxgi {
wgc_capture_t::wgc_capture_t() {
InitializeConditionVariable(&frame_present_cv);
}
wgc_capture_t::~wgc_capture_t() {
if (capture_session)
capture_session.Close();
if (frame_pool)
frame_pool.Close();
item = nullptr;
capture_session = nullptr;
frame_pool = nullptr;
}
/**
* @brief Initialize the Windows.Graphics.Capture backend.
* @return 0 on success, -1 on failure.
*/
int
wgc_capture_t::init(display_base_t *display, const ::video::config_t &config) {
HRESULT status;
dxgi::dxgi_t dxgi;
winrt::com_ptr<::IInspectable> d3d_comhandle;
try {
if (!winrt::GraphicsCaptureSession::IsSupported()) {
BOOST_LOG(error) << "Screen capture is not supported on this device for this release of Windows!"sv;
return -1;
}
if (FAILED(status = display->device->QueryInterface(IID_IDXGIDevice, (void **) &dxgi))) {
BOOST_LOG(error) << "Failed to query DXGI interface from device [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
if (FAILED(status = winrt::CreateDirect3D11DeviceFromDXGIDevice(*&dxgi, d3d_comhandle.put()))) {
BOOST_LOG(error) << "Failed to query WinRT DirectX interface from device [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
}
catch (winrt::hresult_error &e) {
BOOST_LOG(error) << "Screen capture is not supported on this device for this release of Windows: failed to acquire device: [0x"sv << util::hex(e.code()).to_string_view() << ']';
return -1;
}
DXGI_OUTPUT_DESC output_desc;
uwp_device = d3d_comhandle.as<winrt::IDirect3DDevice>();
display->output->GetDesc(&output_desc);
auto monitor_factory = winrt::get_activation_factory<winrt::GraphicsCaptureItem, IGraphicsCaptureItemInterop>();
if (monitor_factory == nullptr ||
FAILED(status = monitor_factory->CreateForMonitor(output_desc.Monitor, winrt::guid_of<winrt::IGraphicsCaptureItem>(), winrt::put_abi(item)))) {
BOOST_LOG(error) << "Screen capture is not supported on this device for this release of Windows: failed to acquire display: [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
if (config.dynamicRange)
display->capture_format = DXGI_FORMAT_R16G16B16A16_FLOAT;
else
display->capture_format = DXGI_FORMAT_B8G8R8A8_UNORM;
try {
frame_pool = winrt::Direct3D11CaptureFramePool::CreateFreeThreaded(uwp_device, static_cast<winrt::Windows::Graphics::DirectX::DirectXPixelFormat>(display->capture_format), 2, item.Size());
capture_session = frame_pool.CreateCaptureSession(item);
frame_pool.FrameArrived({ this, &wgc_capture_t::on_frame_arrived });
}
catch (winrt::hresult_error &e) {
BOOST_LOG(error) << "Screen capture is not supported on this device for this release of Windows: failed to create capture session: [0x"sv << util::hex(e.code()).to_string_view() << ']';
return -1;
}
try {
if (winrt::ApiInformation::IsPropertyPresent(L"Windows.Graphics.Capture.GraphicsCaptureSession", L"IsBorderRequired")) {
capture_session.IsBorderRequired(false);
}
else {
BOOST_LOG(warning) << "Can't disable colored border around capture area on this version of Windows";
}
}
catch (winrt::hresult_error &e) {
BOOST_LOG(warning) << "Screen capture may not be fully supported on this device for this release of Windows: failed to disable border around capture area: [0x"sv << util::hex(e.code()).to_string_view() << ']';
}
try {
capture_session.StartCapture();
}
catch (winrt::hresult_error &e) {
BOOST_LOG(error) << "Screen capture is not supported on this device for this release of Windows: failed to start capture: [0x"sv << util::hex(e.code()).to_string_view() << ']';
return -1;
}
return 0;
}
/**
* This function runs in a separate thread spawned by the frame pool and is a producer of frames.
* To maintain parity with the original display interface, this frame will be consumed by the capture thread.
* Acquire a read-write lock, make the produced frame available to the capture thread, then wake the capture thread.
*/
void
wgc_capture_t::on_frame_arrived(winrt::Direct3D11CaptureFramePool const &sender, winrt::IInspectable const &) {
winrt::Windows::Graphics::Capture::Direct3D11CaptureFrame frame { nullptr };
try {
frame = sender.TryGetNextFrame();
}
catch (winrt::hresult_error &e) {
BOOST_LOG(warning) << "Failed to capture frame: "sv << e.code();
return;
}
if (frame != nullptr) {
AcquireSRWLockExclusive(&frame_lock);
if (produced_frame)
produced_frame.Close();
produced_frame = frame;
ReleaseSRWLockExclusive(&frame_lock);
WakeConditionVariable(&frame_present_cv);
}
}
/**
* @brief Get the next frame from the producer thread.
* If not available, the capture thread blocks until one is, or the wait times out.
* @param timeout how long to wait for the next frame
* @param out a texture containing the frame just captured
* @param out_time the timestamp of the frame just captured
*/
capture_e
wgc_capture_t::next_frame(std::chrono::milliseconds timeout, ID3D11Texture2D **out, uint64_t &out_time) {
// this CONSUMER runs in the capture thread
release_frame();
AcquireSRWLockExclusive(&frame_lock);
if (produced_frame == nullptr && SleepConditionVariableSRW(&frame_present_cv, &frame_lock, timeout.count(), 0) == 0) {
ReleaseSRWLockExclusive(&frame_lock);
if (GetLastError() == ERROR_TIMEOUT)
return capture_e::timeout;
else
return capture_e::error;
}
if (produced_frame) {
consumed_frame = produced_frame;
produced_frame = nullptr;
}
ReleaseSRWLockExclusive(&frame_lock);
if (consumed_frame == nullptr) // spurious wakeup
return capture_e::timeout;
auto capture_access = consumed_frame.Surface().as<winrt::IDirect3DDxgiInterfaceAccess>();
if (capture_access == nullptr)
return capture_e::error;
capture_access->GetInterface(IID_ID3D11Texture2D, (void **) out);
out_time = consumed_frame.SystemRelativeTime().count(); // raw ticks from query performance counter
return capture_e::ok;
}
capture_e
wgc_capture_t::release_frame() {
if (consumed_frame != nullptr) {
consumed_frame.Close();
consumed_frame = nullptr;
}
return capture_e::ok;
}
int
wgc_capture_t::set_cursor_visible(bool x) {
try {
if (capture_session.IsCursorCaptureEnabled() != x)
capture_session.IsCursorCaptureEnabled(x);
return 0;
}
catch (winrt::hresult_error &) {
return -1;
}
}
int
display_wgc_ram_t::init(const ::video::config_t &config, const std::string &display_name) {
if (display_base_t::init(config, display_name) || dup.init(this, config))
return -1;
texture.reset();
return 0;
}
/**
* @brief Get the next frame from the Windows.Graphics.Capture API and copy it into a new snapshot texture.
* @param pull_free_image_cb call this to get a new free image from the video subsystem.
* @param img_out the captured frame is returned here
* @param timeout how long to wait for the next frame
* @param cursor_visible whether to capture the cursor
*/
capture_e
display_wgc_ram_t::snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) {
HRESULT status;
texture2d_t src;
uint64_t frame_qpc;
dup.set_cursor_visible(cursor_visible);
auto capture_status = dup.next_frame(timeout, &src, frame_qpc);
if (capture_status != capture_e::ok)
return capture_status;
auto frame_timestamp = std::chrono::steady_clock::now() - qpc_time_difference(qpc_counter(), frame_qpc);
D3D11_TEXTURE2D_DESC desc;
src->GetDesc(&desc);
// Create the staging texture if it doesn't exist. It should match the source in size and format.
if (texture == nullptr) {
capture_format = desc.Format;
BOOST_LOG(info) << "Capture format ["sv << dxgi_format_to_string(capture_format) << ']';
D3D11_TEXTURE2D_DESC t {};
t.Width = width;
t.Height = height;
t.MipLevels = 1;
t.ArraySize = 1;
t.SampleDesc.Count = 1;
t.Usage = D3D11_USAGE_STAGING;
t.Format = capture_format;
t.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
auto status = device->CreateTexture2D(&t, nullptr, &texture);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create staging texture [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
}
// It's possible for our display enumeration to race with mode changes and result in
// mismatched image pool and desktop texture sizes. If this happens, just reinit again.
if (desc.Width != width || desc.Height != height) {
BOOST_LOG(info) << "Capture size changed ["sv << width << 'x' << height << " -> "sv << desc.Width << 'x' << desc.Height << ']';
return capture_e::reinit;
}
// It's also possible for the capture format to change on the fly. If that happens,
// reinitialize capture to try format detection again and create new images.
if (capture_format != desc.Format) {
BOOST_LOG(info) << "Capture format changed ["sv << dxgi_format_to_string(capture_format) << " -> "sv << dxgi_format_to_string(desc.Format) << ']';
return capture_e::reinit;
}
// Copy from GPU to CPU
device_ctx->CopyResource(texture.get(), src.get());
if (!pull_free_image_cb(img_out)) {
return capture_e::interrupted;
}
auto img = (img_t *) img_out.get();
// Map the staging texture for CPU access (making it inaccessible for the GPU)
if (FAILED(status = device_ctx->Map(texture.get(), 0, D3D11_MAP_READ, 0, &img_info))) {
BOOST_LOG(error) << "Failed to map texture [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
// Now that we know the capture format, we can finish creating the image
if (complete_img(img, false)) {
device_ctx->Unmap(texture.get(), 0);
img_info.pData = nullptr;
return capture_e::error;
}
std::copy_n((std::uint8_t *) img_info.pData, height * img_info.RowPitch, (std::uint8_t *) img->data);
// Unmap the staging texture to allow GPU access again
device_ctx->Unmap(texture.get(), 0);
img_info.pData = nullptr;
if (img) {
img->frame_timestamp = frame_timestamp;
}
return capture_e::ok;
}
capture_e
display_wgc_ram_t::release_snapshot() {
return dup.release_frame();
}
} // namespace platf::dxgi
| 13,075
|
C++
|
.cpp
| 295
| 38.125424
| 216
| 0.666247
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,020
|
display_vram.cpp
|
LizardByte_Sunshine/src/platform/windows/display_vram.cpp
|
/**
* @file src/platform/windows/display_vram.cpp
* @brief Definitions for handling video ram.
*/
#include <cmath>
#include <d3dcompiler.h>
#include <directxmath.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext_d3d11va.h>
}
#include "display.h"
#include "misc.h"
#include "src/config.h"
#include "src/logging.h"
#include "src/nvenc/nvenc_config.h"
#include "src/nvenc/nvenc_d3d11_native.h"
#include "src/nvenc/nvenc_d3d11_on_cuda.h"
#include "src/nvenc/nvenc_utils.h"
#include "src/video.h"
#include <AMF/core/Factory.h>
#include <boost/algorithm/string/predicate.hpp>
#if !defined(SUNSHINE_SHADERS_DIR) // for testing this needs to be defined in cmake as we don't do an install
#define SUNSHINE_SHADERS_DIR SUNSHINE_ASSETS_DIR "/shaders/directx"
#endif
namespace platf {
using namespace std::literals;
}
static void
free_frame(AVFrame *frame) {
av_frame_free(&frame);
}
using frame_t = util::safe_ptr<AVFrame, free_frame>;
namespace platf::dxgi {
template <class T>
buf_t
make_buffer(device_t::pointer device, const T &t) {
static_assert(sizeof(T) % 16 == 0, "Buffer needs to be aligned on a 16-byte alignment");
D3D11_BUFFER_DESC buffer_desc {
sizeof(T),
D3D11_USAGE_IMMUTABLE,
D3D11_BIND_CONSTANT_BUFFER
};
D3D11_SUBRESOURCE_DATA init_data {
&t
};
buf_t::pointer buf_p;
auto status = device->CreateBuffer(&buffer_desc, &init_data, &buf_p);
if (status) {
BOOST_LOG(error) << "Failed to create buffer: [0x"sv << util::hex(status).to_string_view() << ']';
return nullptr;
}
return buf_t { buf_p };
}
blend_t
make_blend(device_t::pointer device, bool enable, bool invert) {
D3D11_BLEND_DESC bdesc {};
auto &rt = bdesc.RenderTarget[0];
rt.BlendEnable = enable;
rt.RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
if (enable) {
rt.BlendOp = D3D11_BLEND_OP_ADD;
rt.BlendOpAlpha = D3D11_BLEND_OP_ADD;
if (invert) {
// Invert colors
rt.SrcBlend = D3D11_BLEND_INV_DEST_COLOR;
rt.DestBlend = D3D11_BLEND_INV_SRC_COLOR;
}
else {
// Regular alpha blending
rt.SrcBlend = D3D11_BLEND_SRC_ALPHA;
rt.DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
}
rt.SrcBlendAlpha = D3D11_BLEND_ZERO;
rt.DestBlendAlpha = D3D11_BLEND_ZERO;
}
blend_t blend;
auto status = device->CreateBlendState(&bdesc, &blend);
if (status) {
BOOST_LOG(error) << "Failed to create blend state: [0x"sv << util::hex(status).to_string_view() << ']';
return nullptr;
}
return blend;
}
blob_t convert_yuv420_packed_uv_type0_ps_hlsl;
blob_t convert_yuv420_packed_uv_type0_ps_linear_hlsl;
blob_t convert_yuv420_packed_uv_type0_ps_perceptual_quantizer_hlsl;
blob_t convert_yuv420_packed_uv_type0_vs_hlsl;
blob_t convert_yuv420_packed_uv_type0s_ps_hlsl;
blob_t convert_yuv420_packed_uv_type0s_ps_linear_hlsl;
blob_t convert_yuv420_packed_uv_type0s_ps_perceptual_quantizer_hlsl;
blob_t convert_yuv420_packed_uv_type0s_vs_hlsl;
blob_t convert_yuv420_planar_y_ps_hlsl;
blob_t convert_yuv420_planar_y_ps_linear_hlsl;
blob_t convert_yuv420_planar_y_ps_perceptual_quantizer_hlsl;
blob_t convert_yuv420_planar_y_vs_hlsl;
blob_t convert_yuv444_packed_ayuv_ps_hlsl;
blob_t convert_yuv444_packed_ayuv_ps_linear_hlsl;
blob_t convert_yuv444_packed_vs_hlsl;
blob_t convert_yuv444_planar_ps_hlsl;
blob_t convert_yuv444_planar_ps_linear_hlsl;
blob_t convert_yuv444_planar_ps_perceptual_quantizer_hlsl;
blob_t convert_yuv444_packed_y410_ps_hlsl;
blob_t convert_yuv444_packed_y410_ps_linear_hlsl;
blob_t convert_yuv444_packed_y410_ps_perceptual_quantizer_hlsl;
blob_t convert_yuv444_planar_vs_hlsl;
blob_t cursor_ps_hlsl;
blob_t cursor_ps_normalize_white_hlsl;
blob_t cursor_vs_hlsl;
struct img_d3d_t: public platf::img_t {
// These objects are owned by the display_t's ID3D11Device
texture2d_t capture_texture;
render_target_t capture_rt;
keyed_mutex_t capture_mutex;
// This is the shared handle used by hwdevice_t to open capture_texture
HANDLE encoder_texture_handle = {};
// Set to true if the image corresponds to a dummy texture used prior to
// the first successful capture of a desktop frame
bool dummy = false;
// Set to true if the image is blank (contains no content at all, including a cursor)
bool blank = true;
// Unique identifier for this image
uint32_t id = 0;
// DXGI format of this image texture
DXGI_FORMAT format;
virtual ~img_d3d_t() override {
if (encoder_texture_handle) {
CloseHandle(encoder_texture_handle);
}
};
};
struct texture_lock_helper {
keyed_mutex_t _mutex;
bool _locked = false;
texture_lock_helper(const texture_lock_helper &) = delete;
texture_lock_helper &
operator=(const texture_lock_helper &) = delete;
texture_lock_helper(texture_lock_helper &&other) {
_mutex.reset(other._mutex.release());
_locked = other._locked;
other._locked = false;
}
texture_lock_helper &
operator=(texture_lock_helper &&other) {
if (_locked) _mutex->ReleaseSync(0);
_mutex.reset(other._mutex.release());
_locked = other._locked;
other._locked = false;
return *this;
}
texture_lock_helper(IDXGIKeyedMutex *mutex):
_mutex(mutex) {
if (_mutex) _mutex->AddRef();
}
~texture_lock_helper() {
if (_locked) _mutex->ReleaseSync(0);
}
bool
lock() {
if (_locked) return true;
HRESULT status = _mutex->AcquireSync(0, INFINITE);
if (status == S_OK) {
_locked = true;
}
else {
BOOST_LOG(error) << "Failed to acquire texture mutex [0x"sv << util::hex(status).to_string_view() << ']';
}
return _locked;
}
};
util::buffer_t<std::uint8_t>
make_cursor_xor_image(const util::buffer_t<std::uint8_t> &img_data, DXGI_OUTDUPL_POINTER_SHAPE_INFO shape_info) {
constexpr std::uint32_t inverted = 0xFFFFFFFF;
constexpr std::uint32_t transparent = 0;
switch (shape_info.Type) {
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR:
// This type doesn't require any XOR-blending
return {};
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR: {
util::buffer_t<std::uint8_t> cursor_img = img_data;
std::for_each((std::uint32_t *) std::begin(cursor_img), (std::uint32_t *) std::end(cursor_img), [](auto &pixel) {
auto alpha = (std::uint8_t)((pixel >> 24) & 0xFF);
if (alpha == 0xFF) {
// Pixels with 0xFF alpha will be XOR-blended as is.
}
else if (alpha == 0x00) {
// Pixels with 0x00 alpha will be blended by make_cursor_alpha_image().
// We make them transparent for the XOR-blended cursor image.
pixel = transparent;
}
else {
// Other alpha values are illegal in masked color cursors
BOOST_LOG(warning) << "Illegal alpha value in masked color cursor: " << alpha;
}
});
return cursor_img;
}
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME:
// Monochrome is handled below
break;
default:
BOOST_LOG(error) << "Invalid cursor shape type: " << shape_info.Type;
return {};
}
shape_info.Height /= 2;
util::buffer_t<std::uint8_t> cursor_img { shape_info.Width * shape_info.Height * 4 };
auto bytes = shape_info.Pitch * shape_info.Height;
auto pixel_begin = (std::uint32_t *) std::begin(cursor_img);
auto pixel_data = pixel_begin;
auto and_mask = std::begin(img_data);
auto xor_mask = std::begin(img_data) + bytes;
for (auto x = 0; x < bytes; ++x) {
for (auto c = 7; c >= 0 && ((std::uint8_t *) pixel_data) != std::end(cursor_img); --c) {
auto bit = 1 << c;
auto color_type = ((*and_mask & bit) ? 1 : 0) + ((*xor_mask & bit) ? 2 : 0);
switch (color_type) {
case 0: // Opaque black (handled by alpha-blending)
case 2: // Opaque white (handled by alpha-blending)
case 1: // Color of screen (transparent)
*pixel_data = transparent;
break;
case 3: // Inverse of screen
*pixel_data = inverted;
break;
}
++pixel_data;
}
++and_mask;
++xor_mask;
}
return cursor_img;
}
util::buffer_t<std::uint8_t>
make_cursor_alpha_image(const util::buffer_t<std::uint8_t> &img_data, DXGI_OUTDUPL_POINTER_SHAPE_INFO shape_info) {
constexpr std::uint32_t black = 0xFF000000;
constexpr std::uint32_t white = 0xFFFFFFFF;
constexpr std::uint32_t transparent = 0;
switch (shape_info.Type) {
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR: {
util::buffer_t<std::uint8_t> cursor_img = img_data;
std::for_each((std::uint32_t *) std::begin(cursor_img), (std::uint32_t *) std::end(cursor_img), [](auto &pixel) {
auto alpha = (std::uint8_t)((pixel >> 24) & 0xFF);
if (alpha == 0xFF) {
// Pixels with 0xFF alpha will be XOR-blended by make_cursor_xor_image().
// We make them transparent for the alpha-blended cursor image.
pixel = transparent;
}
else if (alpha == 0x00) {
// Pixels with 0x00 alpha will be blended as opaque with the alpha-blended image.
pixel |= 0xFF000000;
}
else {
// Other alpha values are illegal in masked color cursors
BOOST_LOG(warning) << "Illegal alpha value in masked color cursor: " << alpha;
}
});
return cursor_img;
}
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR:
// Color cursors are just an ARGB bitmap which requires no processing.
return img_data;
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME:
// Monochrome cursors are handled below.
break;
default:
BOOST_LOG(error) << "Invalid cursor shape type: " << shape_info.Type;
return {};
}
shape_info.Height /= 2;
util::buffer_t<std::uint8_t> cursor_img { shape_info.Width * shape_info.Height * 4 };
auto bytes = shape_info.Pitch * shape_info.Height;
auto pixel_begin = (std::uint32_t *) std::begin(cursor_img);
auto pixel_data = pixel_begin;
auto and_mask = std::begin(img_data);
auto xor_mask = std::begin(img_data) + bytes;
for (auto x = 0; x < bytes; ++x) {
for (auto c = 7; c >= 0 && ((std::uint8_t *) pixel_data) != std::end(cursor_img); --c) {
auto bit = 1 << c;
auto color_type = ((*and_mask & bit) ? 1 : 0) + ((*xor_mask & bit) ? 2 : 0);
switch (color_type) {
case 0: // Opaque black
*pixel_data = black;
break;
case 2: // Opaque white
*pixel_data = white;
break;
case 3: // Inverse of screen (handled by XOR blending)
case 1: // Color of screen (transparent)
*pixel_data = transparent;
break;
}
++pixel_data;
}
++and_mask;
++xor_mask;
}
return cursor_img;
}
blob_t
compile_shader(LPCSTR file, LPCSTR entrypoint, LPCSTR shader_model) {
blob_t::pointer msg_p = nullptr;
blob_t::pointer compiled_p;
DWORD flags = D3DCOMPILE_ENABLE_STRICTNESS;
#ifndef NDEBUG
flags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
#endif
auto wFile = from_utf8(file);
auto status = D3DCompileFromFile(wFile.c_str(), nullptr, D3D_COMPILE_STANDARD_FILE_INCLUDE, entrypoint, shader_model, flags, 0, &compiled_p, &msg_p);
if (msg_p) {
BOOST_LOG(warning) << std::string_view { (const char *) msg_p->GetBufferPointer(), msg_p->GetBufferSize() - 1 };
msg_p->Release();
}
if (status) {
BOOST_LOG(error) << "Couldn't compile ["sv << file << "] [0x"sv << util::hex(status).to_string_view() << ']';
return nullptr;
}
return blob_t { compiled_p };
}
blob_t
compile_pixel_shader(LPCSTR file) {
return compile_shader(file, "main_ps", "ps_5_0");
}
blob_t
compile_vertex_shader(LPCSTR file) {
return compile_shader(file, "main_vs", "vs_5_0");
}
class d3d_base_encode_device final {
public:
int
convert(platf::img_t &img_base) {
// Garbage collect mapped capture images whose weak references have expired
for (auto it = img_ctx_map.begin(); it != img_ctx_map.end();) {
if (it->second.img_weak.expired()) {
it = img_ctx_map.erase(it);
}
else {
it++;
}
}
auto &img = (img_d3d_t &) img_base;
if (!img.blank) {
auto &img_ctx = img_ctx_map[img.id];
// Open the shared capture texture with our ID3D11Device
if (initialize_image_context(img, img_ctx)) {
return -1;
}
// Acquire encoder mutex to synchronize with capture code
auto status = img_ctx.encoder_mutex->AcquireSync(0, INFINITE);
if (status != S_OK) {
BOOST_LOG(error) << "Failed to acquire encoder mutex [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
auto draw = [&](auto &input, auto &y_or_yuv_viewports, auto &uv_viewport) {
device_ctx->PSSetShaderResources(0, 1, &input);
// Draw Y/YUV
device_ctx->OMSetRenderTargets(1, &out_Y_or_YUV_rtv, nullptr);
device_ctx->VSSetShader(convert_Y_or_YUV_vs.get(), nullptr, 0);
device_ctx->PSSetShader(img.format == DXGI_FORMAT_R16G16B16A16_FLOAT ? convert_Y_or_YUV_fp16_ps.get() : convert_Y_or_YUV_ps.get(), nullptr, 0);
auto viewport_count = (format == DXGI_FORMAT_R16_UINT) ? 3 : 1;
assert(viewport_count <= y_or_yuv_viewports.size());
device_ctx->RSSetViewports(viewport_count, y_or_yuv_viewports.data());
device_ctx->Draw(3 * viewport_count, 0); // vertex shader will spread vertices across viewports
// Draw UV if needed
if (out_UV_rtv) {
assert(format == DXGI_FORMAT_NV12 || format == DXGI_FORMAT_P010);
device_ctx->OMSetRenderTargets(1, &out_UV_rtv, nullptr);
device_ctx->VSSetShader(convert_UV_vs.get(), nullptr, 0);
device_ctx->PSSetShader(img.format == DXGI_FORMAT_R16G16B16A16_FLOAT ? convert_UV_fp16_ps.get() : convert_UV_ps.get(), nullptr, 0);
device_ctx->RSSetViewports(1, &uv_viewport);
device_ctx->Draw(3, 0);
}
};
// Clear render target view(s) once so that the aspect ratio mismatch "bars" appear black
if (!rtvs_cleared) {
auto black = create_black_texture_for_rtv_clear();
if (black) draw(black, out_Y_or_YUV_viewports_for_clear, out_UV_viewport_for_clear);
rtvs_cleared = true;
}
// Draw captured frame
draw(img_ctx.encoder_input_res, out_Y_or_YUV_viewports, out_UV_viewport);
// Release encoder mutex to allow capture code to reuse this image
img_ctx.encoder_mutex->ReleaseSync(0);
ID3D11ShaderResourceView *emptyShaderResourceView = nullptr;
device_ctx->PSSetShaderResources(0, 1, &emptyShaderResourceView);
}
return 0;
}
void
apply_colorspace(const ::video::sunshine_colorspace_t &colorspace) {
auto color_vectors = ::video::color_vectors_from_colorspace(colorspace);
if (format == DXGI_FORMAT_AYUV ||
format == DXGI_FORMAT_R16_UINT ||
format == DXGI_FORMAT_Y410) {
color_vectors = ::video::new_color_vectors_from_colorspace(colorspace);
}
if (!color_vectors) {
BOOST_LOG(error) << "No vector data for colorspace"sv;
return;
}
auto color_matrix = make_buffer(device.get(), *color_vectors);
if (!color_matrix) {
BOOST_LOG(warning) << "Failed to create color matrix"sv;
return;
}
device_ctx->VSSetConstantBuffers(3, 1, &color_matrix);
device_ctx->PSSetConstantBuffers(0, 1, &color_matrix);
this->color_matrix = std::move(color_matrix);
}
int
init_output(ID3D11Texture2D *frame_texture, int width, int height) {
// The underlying frame pool owns the texture, so we must reference it for ourselves
frame_texture->AddRef();
output_texture.reset(frame_texture);
HRESULT status = S_OK;
#define create_vertex_shader_helper(x, y) \
if (FAILED(status = device->CreateVertexShader(x->GetBufferPointer(), x->GetBufferSize(), nullptr, &y))) { \
BOOST_LOG(error) << "Failed to create vertex shader " << #x << ": " << util::log_hex(status); \
return -1; \
}
#define create_pixel_shader_helper(x, y) \
if (FAILED(status = device->CreatePixelShader(x->GetBufferPointer(), x->GetBufferSize(), nullptr, &y))) { \
BOOST_LOG(error) << "Failed to create pixel shader " << #x << ": " << util::log_hex(status); \
return -1; \
}
const bool downscaling = display->width > width || display->height > height;
switch (format) {
case DXGI_FORMAT_NV12:
// Semi-planar 8-bit YUV 4:2:0
create_vertex_shader_helper(convert_yuv420_planar_y_vs_hlsl, convert_Y_or_YUV_vs);
create_pixel_shader_helper(convert_yuv420_planar_y_ps_hlsl, convert_Y_or_YUV_ps);
create_pixel_shader_helper(convert_yuv420_planar_y_ps_linear_hlsl, convert_Y_or_YUV_fp16_ps);
if (downscaling) {
create_vertex_shader_helper(convert_yuv420_packed_uv_type0s_vs_hlsl, convert_UV_vs);
create_pixel_shader_helper(convert_yuv420_packed_uv_type0s_ps_hlsl, convert_UV_ps);
create_pixel_shader_helper(convert_yuv420_packed_uv_type0s_ps_linear_hlsl, convert_UV_fp16_ps);
}
else {
create_vertex_shader_helper(convert_yuv420_packed_uv_type0_vs_hlsl, convert_UV_vs);
create_pixel_shader_helper(convert_yuv420_packed_uv_type0_ps_hlsl, convert_UV_ps);
create_pixel_shader_helper(convert_yuv420_packed_uv_type0_ps_linear_hlsl, convert_UV_fp16_ps);
}
break;
case DXGI_FORMAT_P010:
// Semi-planar 16-bit YUV 4:2:0, 10 most significant bits store the value
create_vertex_shader_helper(convert_yuv420_planar_y_vs_hlsl, convert_Y_or_YUV_vs);
create_pixel_shader_helper(convert_yuv420_planar_y_ps_hlsl, convert_Y_or_YUV_ps);
if (display->is_hdr()) {
create_pixel_shader_helper(convert_yuv420_planar_y_ps_perceptual_quantizer_hlsl, convert_Y_or_YUV_fp16_ps);
}
else {
create_pixel_shader_helper(convert_yuv420_planar_y_ps_linear_hlsl, convert_Y_or_YUV_fp16_ps);
}
if (downscaling) {
create_vertex_shader_helper(convert_yuv420_packed_uv_type0s_vs_hlsl, convert_UV_vs);
create_pixel_shader_helper(convert_yuv420_packed_uv_type0s_ps_hlsl, convert_UV_ps);
if (display->is_hdr()) {
create_pixel_shader_helper(convert_yuv420_packed_uv_type0s_ps_perceptual_quantizer_hlsl, convert_UV_fp16_ps);
}
else {
create_pixel_shader_helper(convert_yuv420_packed_uv_type0s_ps_linear_hlsl, convert_UV_fp16_ps);
}
}
else {
create_vertex_shader_helper(convert_yuv420_packed_uv_type0_vs_hlsl, convert_UV_vs);
create_pixel_shader_helper(convert_yuv420_packed_uv_type0_ps_hlsl, convert_UV_ps);
if (display->is_hdr()) {
create_pixel_shader_helper(convert_yuv420_packed_uv_type0_ps_perceptual_quantizer_hlsl, convert_UV_fp16_ps);
}
else {
create_pixel_shader_helper(convert_yuv420_packed_uv_type0_ps_linear_hlsl, convert_UV_fp16_ps);
}
}
break;
case DXGI_FORMAT_R16_UINT:
// Planar 16-bit YUV 4:4:4, 10 most significant bits store the value
create_vertex_shader_helper(convert_yuv444_planar_vs_hlsl, convert_Y_or_YUV_vs);
create_pixel_shader_helper(convert_yuv444_planar_ps_hlsl, convert_Y_or_YUV_ps);
if (display->is_hdr()) {
create_pixel_shader_helper(convert_yuv444_planar_ps_perceptual_quantizer_hlsl, convert_Y_or_YUV_fp16_ps);
}
else {
create_pixel_shader_helper(convert_yuv444_planar_ps_linear_hlsl, convert_Y_or_YUV_fp16_ps);
}
break;
case DXGI_FORMAT_AYUV:
// Packed 8-bit YUV 4:4:4
create_vertex_shader_helper(convert_yuv444_packed_vs_hlsl, convert_Y_or_YUV_vs);
create_pixel_shader_helper(convert_yuv444_packed_ayuv_ps_hlsl, convert_Y_or_YUV_ps);
create_pixel_shader_helper(convert_yuv444_packed_ayuv_ps_linear_hlsl, convert_Y_or_YUV_fp16_ps);
break;
case DXGI_FORMAT_Y410:
// Packed 10-bit YUV 4:4:4
create_vertex_shader_helper(convert_yuv444_packed_vs_hlsl, convert_Y_or_YUV_vs);
create_pixel_shader_helper(convert_yuv444_packed_y410_ps_hlsl, convert_Y_or_YUV_ps);
if (display->is_hdr()) {
create_pixel_shader_helper(convert_yuv444_packed_y410_ps_perceptual_quantizer_hlsl, convert_Y_or_YUV_fp16_ps);
}
else {
create_pixel_shader_helper(convert_yuv444_packed_y410_ps_linear_hlsl, convert_Y_or_YUV_fp16_ps);
}
break;
default:
BOOST_LOG(error) << "Unable to create shaders because of the unrecognized surface format";
return -1;
}
#undef create_vertex_shader_helper
#undef create_pixel_shader_helper
auto out_width = width;
auto out_height = height;
float in_width = display->width;
float in_height = display->height;
// Ensure aspect ratio is maintained
auto scalar = std::fminf(out_width / in_width, out_height / in_height);
auto out_width_f = in_width * scalar;
auto out_height_f = in_height * scalar;
// result is always positive
auto offsetX = (out_width - out_width_f) / 2;
auto offsetY = (out_height - out_height_f) / 2;
out_Y_or_YUV_viewports[0] = { offsetX, offsetY, out_width_f, out_height_f, 0.0f, 1.0f }; // Y plane
out_Y_or_YUV_viewports[1] = out_Y_or_YUV_viewports[0]; // U plane
out_Y_or_YUV_viewports[1].TopLeftY += out_height;
out_Y_or_YUV_viewports[2] = out_Y_or_YUV_viewports[1]; // V plane
out_Y_or_YUV_viewports[2].TopLeftY += out_height;
out_Y_or_YUV_viewports_for_clear[0] = { 0, 0, (float) out_width, (float) out_height, 0.0f, 1.0f }; // Y plane
out_Y_or_YUV_viewports_for_clear[1] = out_Y_or_YUV_viewports_for_clear[0]; // U plane
out_Y_or_YUV_viewports_for_clear[1].TopLeftY += out_height;
out_Y_or_YUV_viewports_for_clear[2] = out_Y_or_YUV_viewports_for_clear[1]; // V plane
out_Y_or_YUV_viewports_for_clear[2].TopLeftY += out_height;
out_UV_viewport = { offsetX / 2, offsetY / 2, out_width_f / 2, out_height_f / 2, 0.0f, 1.0f };
out_UV_viewport_for_clear = { 0, 0, (float) out_width / 2, (float) out_height / 2, 0.0f, 1.0f };
float subsample_offset_in[16 / sizeof(float)] { 1.0f / (float) out_width_f, 1.0f / (float) out_height_f }; // aligned to 16-byte
subsample_offset = make_buffer(device.get(), subsample_offset_in);
if (!subsample_offset) {
BOOST_LOG(error) << "Failed to create subsample offset vertex constant buffer";
return -1;
}
device_ctx->VSSetConstantBuffers(0, 1, &subsample_offset);
{
int32_t rotation_modifier = display->display_rotation == DXGI_MODE_ROTATION_UNSPECIFIED ? 0 : display->display_rotation - 1;
int32_t rotation_data[16 / sizeof(int32_t)] { -rotation_modifier }; // aligned to 16-byte
auto rotation = make_buffer(device.get(), rotation_data);
if (!rotation) {
BOOST_LOG(error) << "Failed to create display rotation vertex constant buffer";
return -1;
}
device_ctx->VSSetConstantBuffers(1, 1, &rotation);
}
DXGI_FORMAT rtv_Y_or_YUV_format = DXGI_FORMAT_UNKNOWN;
DXGI_FORMAT rtv_UV_format = DXGI_FORMAT_UNKNOWN;
bool rtv_simple_clear = false;
switch (format) {
case DXGI_FORMAT_NV12:
rtv_Y_or_YUV_format = DXGI_FORMAT_R8_UNORM;
rtv_UV_format = DXGI_FORMAT_R8G8_UNORM;
rtv_simple_clear = true;
break;
case DXGI_FORMAT_P010:
rtv_Y_or_YUV_format = DXGI_FORMAT_R16_UNORM;
rtv_UV_format = DXGI_FORMAT_R16G16_UNORM;
rtv_simple_clear = true;
break;
case DXGI_FORMAT_AYUV:
rtv_Y_or_YUV_format = DXGI_FORMAT_R8G8B8A8_UINT;
break;
case DXGI_FORMAT_R16_UINT:
rtv_Y_or_YUV_format = DXGI_FORMAT_R16_UINT;
break;
case DXGI_FORMAT_Y410:
rtv_Y_or_YUV_format = DXGI_FORMAT_R10G10B10A2_UINT;
break;
default:
BOOST_LOG(error) << "Unable to create render target views because of the unrecognized surface format";
return -1;
}
auto create_rtv = [&](auto &rt, DXGI_FORMAT rt_format) -> bool {
D3D11_RENDER_TARGET_VIEW_DESC rtv_desc = {};
rtv_desc.Format = rt_format;
rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
auto status = device->CreateRenderTargetView(output_texture.get(), &rtv_desc, &rt);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create render target view: " << util::log_hex(status);
return false;
}
return true;
};
// Create Y/YUV render target view
if (!create_rtv(out_Y_or_YUV_rtv, rtv_Y_or_YUV_format)) return -1;
// Create UV render target view if needed
if (rtv_UV_format != DXGI_FORMAT_UNKNOWN && !create_rtv(out_UV_rtv, rtv_UV_format)) return -1;
if (rtv_simple_clear) {
// Clear the RTVs to ensure the aspect ratio padding is black
const float y_black[] = { 0.0f, 0.0f, 0.0f, 0.0f };
device_ctx->ClearRenderTargetView(out_Y_or_YUV_rtv.get(), y_black);
if (out_UV_rtv) {
const float uv_black[] = { 0.5f, 0.5f, 0.5f, 0.5f };
device_ctx->ClearRenderTargetView(out_UV_rtv.get(), uv_black);
}
rtvs_cleared = true;
}
else {
// Can't use ClearRenderTargetView(), will clear on first convert()
rtvs_cleared = false;
}
return 0;
}
int
init(std::shared_ptr<platf::display_t> display, adapter_t::pointer adapter_p, pix_fmt_e pix_fmt) {
switch (pix_fmt) {
case pix_fmt_e::nv12:
format = DXGI_FORMAT_NV12;
break;
case pix_fmt_e::p010:
format = DXGI_FORMAT_P010;
break;
case pix_fmt_e::ayuv:
format = DXGI_FORMAT_AYUV;
break;
case pix_fmt_e::yuv444p16:
format = DXGI_FORMAT_R16_UINT;
break;
case pix_fmt_e::y410:
format = DXGI_FORMAT_Y410;
break;
default:
BOOST_LOG(error) << "D3D11 backend doesn't support pixel format: " << from_pix_fmt(pix_fmt);
return -1;
}
D3D_FEATURE_LEVEL featureLevels[] {
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
HRESULT status = D3D11CreateDevice(
adapter_p,
D3D_DRIVER_TYPE_UNKNOWN,
nullptr,
D3D11_CREATE_DEVICE_FLAGS,
featureLevels, sizeof(featureLevels) / sizeof(D3D_FEATURE_LEVEL),
D3D11_SDK_VERSION,
&device,
nullptr,
&device_ctx);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create encoder D3D11 device [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
dxgi::dxgi_t dxgi;
status = device->QueryInterface(IID_IDXGIDevice, (void **) &dxgi);
if (FAILED(status)) {
BOOST_LOG(warning) << "Failed to query DXGI interface from device [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
status = dxgi->SetGPUThreadPriority(7);
if (FAILED(status)) {
BOOST_LOG(warning) << "Failed to increase encoding GPU thread priority. Please run application as administrator for optimal performance.";
}
auto default_color_vectors = ::video::color_vectors_from_colorspace(::video::colorspace_e::rec601, false);
if (!default_color_vectors) {
BOOST_LOG(error) << "Missing color vectors for Rec. 601"sv;
return -1;
}
color_matrix = make_buffer(device.get(), *default_color_vectors);
if (!color_matrix) {
BOOST_LOG(error) << "Failed to create color matrix buffer"sv;
return -1;
}
device_ctx->VSSetConstantBuffers(3, 1, &color_matrix);
device_ctx->PSSetConstantBuffers(0, 1, &color_matrix);
this->display = std::dynamic_pointer_cast<display_base_t>(display);
if (!this->display) {
return -1;
}
display = nullptr;
blend_disable = make_blend(device.get(), false, false);
if (!blend_disable) {
return -1;
}
D3D11_SAMPLER_DESC sampler_desc {};
sampler_desc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
sampler_desc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP;
sampler_desc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP;
sampler_desc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.ComparisonFunc = D3D11_COMPARISON_NEVER;
sampler_desc.MinLOD = 0;
sampler_desc.MaxLOD = D3D11_FLOAT32_MAX;
status = device->CreateSamplerState(&sampler_desc, &sampler_linear);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create point sampler state [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
device_ctx->OMSetBlendState(blend_disable.get(), nullptr, 0xFFFFFFFFu);
device_ctx->PSSetSamplers(0, 1, &sampler_linear);
device_ctx->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
return 0;
}
struct encoder_img_ctx_t {
// Used to determine if the underlying texture changes.
// Not safe for actual use by the encoder!
texture2d_t::const_pointer capture_texture_p;
texture2d_t encoder_texture;
shader_res_t encoder_input_res;
keyed_mutex_t encoder_mutex;
std::weak_ptr<const platf::img_t> img_weak;
void
reset() {
capture_texture_p = nullptr;
encoder_texture.reset();
encoder_input_res.reset();
encoder_mutex.reset();
img_weak.reset();
}
};
int
initialize_image_context(const img_d3d_t &img, encoder_img_ctx_t &img_ctx) {
// If we've already opened the shared texture, we're done
if (img_ctx.encoder_texture && img.capture_texture.get() == img_ctx.capture_texture_p) {
return 0;
}
// Reset this image context in case it was used before with a different texture.
// Textures can change when transitioning from a dummy image to a real image.
img_ctx.reset();
device1_t device1;
auto status = device->QueryInterface(__uuidof(ID3D11Device1), (void **) &device1);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to query ID3D11Device1 [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
// Open a handle to the shared texture
status = device1->OpenSharedResource1(img.encoder_texture_handle, __uuidof(ID3D11Texture2D), (void **) &img_ctx.encoder_texture);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to open shared image texture [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
// Get the keyed mutex to synchronize with the capture code
status = img_ctx.encoder_texture->QueryInterface(__uuidof(IDXGIKeyedMutex), (void **) &img_ctx.encoder_mutex);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to query IDXGIKeyedMutex [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
// Create the SRV for the encoder texture
status = device->CreateShaderResourceView(img_ctx.encoder_texture.get(), nullptr, &img_ctx.encoder_input_res);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create shader resource view for encoding [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
img_ctx.capture_texture_p = img.capture_texture.get();
img_ctx.img_weak = img.weak_from_this();
return 0;
}
shader_res_t
create_black_texture_for_rtv_clear() {
constexpr auto width = 32;
constexpr auto height = 32;
D3D11_TEXTURE2D_DESC texture_desc = {};
texture_desc.Width = width;
texture_desc.Height = height;
texture_desc.MipLevels = 1;
texture_desc.ArraySize = 1;
texture_desc.SampleDesc.Count = 1;
texture_desc.Usage = D3D11_USAGE_IMMUTABLE;
texture_desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
texture_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
std::vector<uint8_t> mem(4 * width * height, 0);
D3D11_SUBRESOURCE_DATA texture_data = { mem.data(), 4 * width, 0 };
texture2d_t texture;
auto status = device->CreateTexture2D(&texture_desc, &texture_data, &texture);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create black texture: " << util::log_hex(status);
return {};
}
shader_res_t resource_view;
status = device->CreateShaderResourceView(texture.get(), nullptr, &resource_view);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create black texture resource view: " << util::log_hex(status);
return {};
}
return resource_view;
}
::video::color_t *color_p;
buf_t subsample_offset;
buf_t color_matrix;
blend_t blend_disable;
sampler_state_t sampler_linear;
render_target_t out_Y_or_YUV_rtv;
render_target_t out_UV_rtv;
bool rtvs_cleared = false;
// d3d_img_t::id -> encoder_img_ctx_t
// These store the encoder textures for each img_t that passes through
// convert(). We can't store them in the img_t itself because it is shared
// amongst multiple hwdevice_t objects (and therefore multiple ID3D11Devices).
std::map<uint32_t, encoder_img_ctx_t> img_ctx_map;
std::shared_ptr<display_base_t> display;
vs_t convert_Y_or_YUV_vs;
ps_t convert_Y_or_YUV_ps;
ps_t convert_Y_or_YUV_fp16_ps;
vs_t convert_UV_vs;
ps_t convert_UV_ps;
ps_t convert_UV_fp16_ps;
std::array<D3D11_VIEWPORT, 3> out_Y_or_YUV_viewports, out_Y_or_YUV_viewports_for_clear;
D3D11_VIEWPORT out_UV_viewport, out_UV_viewport_for_clear;
DXGI_FORMAT format;
device_t device;
device_ctx_t device_ctx;
texture2d_t output_texture;
};
class d3d_avcodec_encode_device_t: public avcodec_encode_device_t {
public:
int
init(std::shared_ptr<platf::display_t> display, adapter_t::pointer adapter_p, pix_fmt_e pix_fmt) {
int result = base.init(display, adapter_p, pix_fmt);
data = base.device.get();
return result;
}
int
convert(platf::img_t &img_base) override {
return base.convert(img_base);
}
void
apply_colorspace() override {
base.apply_colorspace(colorspace);
}
void
init_hwframes(AVHWFramesContext *frames) override {
// We may be called with a QSV or D3D11VA context
if (frames->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
auto d3d11_frames = (AVD3D11VAFramesContext *) frames->hwctx;
// The encoder requires textures with D3D11_BIND_RENDER_TARGET set
d3d11_frames->BindFlags = D3D11_BIND_RENDER_TARGET;
d3d11_frames->MiscFlags = 0;
}
// We require a single texture
frames->initial_pool_size = 1;
}
int
prepare_to_derive_context(int hw_device_type) override {
// QuickSync requires our device to be multithread-protected
if (hw_device_type == AV_HWDEVICE_TYPE_QSV) {
multithread_t mt;
auto status = base.device->QueryInterface(IID_ID3D11Multithread, (void **) &mt);
if (FAILED(status)) {
BOOST_LOG(warning) << "Failed to query ID3D11Multithread interface from device [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
mt->SetMultithreadProtected(TRUE);
}
return 0;
}
int
set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx) override {
this->hwframe.reset(frame);
this->frame = frame;
// Populate this frame with a hardware buffer if one isn't there already
if (!frame->buf[0]) {
auto err = av_hwframe_get_buffer(hw_frames_ctx, frame, 0);
if (err) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Failed to get hwframe buffer: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
return -1;
}
}
// If this is a frame from a derived context, we'll need to map it to D3D11
ID3D11Texture2D *frame_texture;
if (frame->format != AV_PIX_FMT_D3D11) {
frame_t d3d11_frame { av_frame_alloc() };
d3d11_frame->format = AV_PIX_FMT_D3D11;
auto err = av_hwframe_map(d3d11_frame.get(), frame, AV_HWFRAME_MAP_WRITE | AV_HWFRAME_MAP_OVERWRITE);
if (err) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Failed to map D3D11 frame: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
return -1;
}
// Get the texture from the mapped frame
frame_texture = (ID3D11Texture2D *) d3d11_frame->data[0];
}
else {
// Otherwise, we can just use the texture inside the original frame
frame_texture = (ID3D11Texture2D *) frame->data[0];
}
return base.init_output(frame_texture, frame->width, frame->height);
}
private:
d3d_base_encode_device base;
frame_t hwframe;
};
class d3d_nvenc_encode_device_t: public nvenc_encode_device_t {
public:
bool
init_device(std::shared_ptr<platf::display_t> display, adapter_t::pointer adapter_p, pix_fmt_e pix_fmt) {
buffer_format = nvenc::nvenc_format_from_sunshine_format(pix_fmt);
if (buffer_format == NV_ENC_BUFFER_FORMAT_UNDEFINED) {
BOOST_LOG(error) << "Unexpected pixel format for NvENC ["sv << from_pix_fmt(pix_fmt) << ']';
return false;
}
if (base.init(display, adapter_p, pix_fmt)) return false;
if (pix_fmt == pix_fmt_e::yuv444p16) {
nvenc_d3d = std::make_unique<nvenc::nvenc_d3d11_on_cuda>(base.device.get());
}
else {
nvenc_d3d = std::make_unique<nvenc::nvenc_d3d11_native>(base.device.get());
}
nvenc = nvenc_d3d.get();
return true;
}
bool
init_encoder(const ::video::config_t &client_config, const ::video::sunshine_colorspace_t &colorspace) override {
if (!nvenc_d3d) return false;
auto nvenc_colorspace = nvenc::nvenc_colorspace_from_sunshine_colorspace(colorspace);
if (!nvenc_d3d->create_encoder(config::video.nv, client_config, nvenc_colorspace, buffer_format)) return false;
base.apply_colorspace(colorspace);
return base.init_output(nvenc_d3d->get_input_texture(), client_config.width, client_config.height) == 0;
}
int
convert(platf::img_t &img_base) override {
return base.convert(img_base);
}
private:
d3d_base_encode_device base;
std::unique_ptr<nvenc::nvenc_d3d11> nvenc_d3d;
NV_ENC_BUFFER_FORMAT buffer_format = NV_ENC_BUFFER_FORMAT_UNDEFINED;
};
bool
set_cursor_texture(device_t::pointer device, gpu_cursor_t &cursor, util::buffer_t<std::uint8_t> &&cursor_img, DXGI_OUTDUPL_POINTER_SHAPE_INFO &shape_info) {
// This cursor image may not be used
if (cursor_img.size() == 0) {
cursor.input_res.reset();
cursor.set_texture(0, 0, nullptr);
return true;
}
D3D11_SUBRESOURCE_DATA data {
std::begin(cursor_img),
4 * shape_info.Width,
0
};
// Create texture for cursor
D3D11_TEXTURE2D_DESC t {};
t.Width = shape_info.Width;
t.Height = cursor_img.size() / data.SysMemPitch;
t.MipLevels = 1;
t.ArraySize = 1;
t.SampleDesc.Count = 1;
t.Usage = D3D11_USAGE_IMMUTABLE;
t.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
t.BindFlags = D3D11_BIND_SHADER_RESOURCE;
texture2d_t texture;
auto status = device->CreateTexture2D(&t, &data, &texture);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create mouse texture [0x"sv << util::hex(status).to_string_view() << ']';
return false;
}
// Free resources before allocating on the next line.
cursor.input_res.reset();
status = device->CreateShaderResourceView(texture.get(), nullptr, &cursor.input_res);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create cursor shader resource view [0x"sv << util::hex(status).to_string_view() << ']';
return false;
}
cursor.set_texture(t.Width, t.Height, std::move(texture));
return true;
}
capture_e
display_ddup_vram_t::snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) {
HRESULT status;
DXGI_OUTDUPL_FRAME_INFO frame_info;
resource_t::pointer res_p {};
auto capture_status = dup.next_frame(frame_info, timeout, &res_p);
resource_t res { res_p };
if (capture_status != capture_e::ok) {
return capture_status;
}
const bool mouse_update_flag = frame_info.LastMouseUpdateTime.QuadPart != 0 || frame_info.PointerShapeBufferSize > 0;
const bool frame_update_flag = frame_info.LastPresentTime.QuadPart != 0;
const bool update_flag = mouse_update_flag || frame_update_flag;
if (!update_flag) {
return capture_e::timeout;
}
std::optional<std::chrono::steady_clock::time_point> frame_timestamp;
if (auto qpc_displayed = std::max(frame_info.LastPresentTime.QuadPart, frame_info.LastMouseUpdateTime.QuadPart)) {
// Translate QueryPerformanceCounter() value to steady_clock time point
frame_timestamp = std::chrono::steady_clock::now() - qpc_time_difference(qpc_counter(), qpc_displayed);
}
if (frame_info.PointerShapeBufferSize > 0) {
DXGI_OUTDUPL_POINTER_SHAPE_INFO shape_info {};
util::buffer_t<std::uint8_t> img_data { frame_info.PointerShapeBufferSize };
UINT dummy;
status = dup.dup->GetFramePointerShape(img_data.size(), std::begin(img_data), &dummy, &shape_info);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to get new pointer shape [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
auto alpha_cursor_img = make_cursor_alpha_image(img_data, shape_info);
auto xor_cursor_img = make_cursor_xor_image(img_data, shape_info);
if (!set_cursor_texture(device.get(), cursor_alpha, std::move(alpha_cursor_img), shape_info) ||
!set_cursor_texture(device.get(), cursor_xor, std::move(xor_cursor_img), shape_info)) {
return capture_e::error;
}
}
if (frame_info.LastMouseUpdateTime.QuadPart) {
cursor_alpha.set_pos(frame_info.PointerPosition.Position.x, frame_info.PointerPosition.Position.y,
width, height, display_rotation, frame_info.PointerPosition.Visible);
cursor_xor.set_pos(frame_info.PointerPosition.Position.x, frame_info.PointerPosition.Position.y,
width, height, display_rotation, frame_info.PointerPosition.Visible);
}
const bool blend_mouse_cursor_flag = (cursor_alpha.visible || cursor_xor.visible) && cursor_visible;
texture2d_t src {};
if (frame_update_flag) {
// Get the texture object from this frame
status = res->QueryInterface(IID_ID3D11Texture2D, (void **) &src);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't query interface [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
D3D11_TEXTURE2D_DESC desc;
src->GetDesc(&desc);
// It's possible for our display enumeration to race with mode changes and result in
// mismatched image pool and desktop texture sizes. If this happens, just reinit again.
if (desc.Width != width_before_rotation || desc.Height != height_before_rotation) {
BOOST_LOG(info) << "Capture size changed ["sv << width << 'x' << height << " -> "sv << desc.Width << 'x' << desc.Height << ']';
return capture_e::reinit;
}
// If we don't know the capture format yet, grab it from this texture
if (capture_format == DXGI_FORMAT_UNKNOWN) {
capture_format = desc.Format;
BOOST_LOG(info) << "Capture format ["sv << dxgi_format_to_string(capture_format) << ']';
}
// It's also possible for the capture format to change on the fly. If that happens,
// reinitialize capture to try format detection again and create new images.
if (capture_format != desc.Format) {
BOOST_LOG(info) << "Capture format changed ["sv << dxgi_format_to_string(capture_format) << " -> "sv << dxgi_format_to_string(desc.Format) << ']';
return capture_e::reinit;
}
}
enum class lfa {
nothing,
replace_surface_with_img,
replace_img_with_surface,
copy_src_to_img,
copy_src_to_surface,
};
enum class ofa {
forward_last_img,
copy_last_surface_and_blend_cursor,
dummy_fallback,
};
auto last_frame_action = lfa::nothing;
auto out_frame_action = ofa::dummy_fallback;
if (capture_format == DXGI_FORMAT_UNKNOWN) {
// We don't know the final capture format yet, so we will encode a black dummy image
last_frame_action = lfa::nothing;
out_frame_action = ofa::dummy_fallback;
}
else {
if (src) {
// We got a new frame from DesktopDuplication...
if (blend_mouse_cursor_flag) {
// ...and we need to blend the mouse cursor onto it.
// Copy the frame to intermediate surface so we can blend this and future mouse cursor updates
// without new frames from DesktopDuplication. We use direct3d surface directly here and not
// an image from pull_free_image_cb mainly because it's lighter (surface sharing between
// direct3d devices produce significant memory overhead).
last_frame_action = lfa::copy_src_to_surface;
// Copy the intermediate surface to a new image from pull_free_image_cb and blend the mouse cursor onto it.
out_frame_action = ofa::copy_last_surface_and_blend_cursor;
}
else {
// ...and we don't need to blend the mouse cursor.
// Copy the frame to a new image from pull_free_image_cb and save the shared pointer to the image
// in case the mouse cursor appears without a new frame from DesktopDuplication.
last_frame_action = lfa::copy_src_to_img;
// Use saved last image shared pointer as output image evading copy.
out_frame_action = ofa::forward_last_img;
}
}
else if (!std::holds_alternative<std::monostate>(last_frame_variant)) {
// We didn't get a new frame from DesktopDuplication...
if (blend_mouse_cursor_flag) {
// ...but we need to blend the mouse cursor.
if (std::holds_alternative<std::shared_ptr<platf::img_t>>(last_frame_variant)) {
// We have the shared pointer of the last image, replace it with intermediate surface
// while copying contents so we can blend this and future mouse cursor updates.
last_frame_action = lfa::replace_img_with_surface;
}
// Copy the intermediate surface which contains last DesktopDuplication frame
// to a new image from pull_free_image_cb and blend the mouse cursor onto it.
out_frame_action = ofa::copy_last_surface_and_blend_cursor;
}
else {
// ...and we don't need to blend the mouse cursor.
// This happens when the mouse cursor disappears from screen,
// or there's mouse cursor on screen, but its drawing is disabled in sunshine.
if (std::holds_alternative<texture2d_t>(last_frame_variant)) {
// We have the intermediate surface that was used as the mouse cursor blending base.
// Replace it with an image from pull_free_image_cb copying contents and freeing up the surface memory.
// Save the shared pointer to the image in case the mouse cursor reappears.
last_frame_action = lfa::replace_surface_with_img;
}
// Use saved last image shared pointer as output image evading copy.
out_frame_action = ofa::forward_last_img;
}
}
}
auto create_surface = [&](texture2d_t &surface) -> bool {
// Try to reuse the old surface if it hasn't been destroyed yet.
if (old_surface_delayed_destruction) {
surface.reset(old_surface_delayed_destruction.release());
return true;
}
// Otherwise create a new surface.
D3D11_TEXTURE2D_DESC t {};
t.Width = width_before_rotation;
t.Height = height_before_rotation;
t.MipLevels = 1;
t.ArraySize = 1;
t.SampleDesc.Count = 1;
t.Usage = D3D11_USAGE_DEFAULT;
t.Format = capture_format;
t.BindFlags = 0;
status = device->CreateTexture2D(&t, nullptr, &surface);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create frame copy texture [0x"sv << util::hex(status).to_string_view() << ']';
return false;
}
return true;
};
auto get_locked_d3d_img = [&](std::shared_ptr<platf::img_t> &img, bool dummy = false) -> std::tuple<std::shared_ptr<img_d3d_t>, texture_lock_helper> {
auto d3d_img = std::static_pointer_cast<img_d3d_t>(img);
// Finish creating the image (if it hasn't happened already),
// also creates synchronization primitives for shared access from multiple direct3d devices.
if (complete_img(d3d_img.get(), dummy)) return { nullptr, nullptr };
// This image is shared between capture direct3d device and encoders direct3d devices,
// we must acquire lock before doing anything to it.
texture_lock_helper lock_helper(d3d_img->capture_mutex.get());
if (!lock_helper.lock()) {
BOOST_LOG(error) << "Failed to lock capture texture";
return { nullptr, nullptr };
}
// Clear the blank flag now that we're ready to capture into the image
d3d_img->blank = false;
return { std::move(d3d_img), std::move(lock_helper) };
};
switch (last_frame_action) {
case lfa::nothing: {
break;
}
case lfa::replace_surface_with_img: {
auto p_surface = std::get_if<texture2d_t>(&last_frame_variant);
if (!p_surface) {
BOOST_LOG(error) << "Logical error at " << __FILE__ << ":" << __LINE__;
return capture_e::error;
}
std::shared_ptr<platf::img_t> img;
if (!pull_free_image_cb(img)) return capture_e::interrupted;
auto [d3d_img, lock] = get_locked_d3d_img(img);
if (!d3d_img) return capture_e::error;
device_ctx->CopyResource(d3d_img->capture_texture.get(), p_surface->get());
// We delay the destruction of intermediate surface in case the mouse cursor reappears shortly.
old_surface_delayed_destruction.reset(p_surface->release());
old_surface_timestamp = std::chrono::steady_clock::now();
last_frame_variant = img;
break;
}
case lfa::replace_img_with_surface: {
auto p_img = std::get_if<std::shared_ptr<platf::img_t>>(&last_frame_variant);
if (!p_img) {
BOOST_LOG(error) << "Logical error at " << __FILE__ << ":" << __LINE__;
return capture_e::error;
}
auto [d3d_img, lock] = get_locked_d3d_img(*p_img);
if (!d3d_img) return capture_e::error;
p_img = nullptr;
last_frame_variant = texture2d_t {};
auto &surface = std::get<texture2d_t>(last_frame_variant);
if (!create_surface(surface)) return capture_e::error;
device_ctx->CopyResource(surface.get(), d3d_img->capture_texture.get());
break;
}
case lfa::copy_src_to_img: {
last_frame_variant = {};
std::shared_ptr<platf::img_t> img;
if (!pull_free_image_cb(img)) return capture_e::interrupted;
auto [d3d_img, lock] = get_locked_d3d_img(img);
if (!d3d_img) return capture_e::error;
device_ctx->CopyResource(d3d_img->capture_texture.get(), src.get());
last_frame_variant = img;
break;
}
case lfa::copy_src_to_surface: {
auto p_surface = std::get_if<texture2d_t>(&last_frame_variant);
if (!p_surface) {
last_frame_variant = texture2d_t {};
p_surface = std::get_if<texture2d_t>(&last_frame_variant);
if (!create_surface(*p_surface)) return capture_e::error;
}
device_ctx->CopyResource(p_surface->get(), src.get());
break;
}
}
auto blend_cursor = [&](img_d3d_t &d3d_img) {
device_ctx->VSSetShader(cursor_vs.get(), nullptr, 0);
device_ctx->PSSetShader(cursor_ps.get(), nullptr, 0);
device_ctx->OMSetRenderTargets(1, &d3d_img.capture_rt, nullptr);
if (cursor_alpha.texture.get()) {
// Perform an alpha blending operation
device_ctx->OMSetBlendState(blend_alpha.get(), nullptr, 0xFFFFFFFFu);
device_ctx->PSSetShaderResources(0, 1, &cursor_alpha.input_res);
device_ctx->RSSetViewports(1, &cursor_alpha.cursor_view);
device_ctx->Draw(3, 0);
}
if (cursor_xor.texture.get()) {
// Perform an invert blending without touching alpha values
device_ctx->OMSetBlendState(blend_invert.get(), nullptr, 0x00FFFFFFu);
device_ctx->PSSetShaderResources(0, 1, &cursor_xor.input_res);
device_ctx->RSSetViewports(1, &cursor_xor.cursor_view);
device_ctx->Draw(3, 0);
}
device_ctx->OMSetBlendState(blend_disable.get(), nullptr, 0xFFFFFFFFu);
ID3D11RenderTargetView *emptyRenderTarget = nullptr;
device_ctx->OMSetRenderTargets(1, &emptyRenderTarget, nullptr);
device_ctx->RSSetViewports(0, nullptr);
ID3D11ShaderResourceView *emptyShaderResourceView = nullptr;
device_ctx->PSSetShaderResources(0, 1, &emptyShaderResourceView);
};
switch (out_frame_action) {
case ofa::forward_last_img: {
auto p_img = std::get_if<std::shared_ptr<platf::img_t>>(&last_frame_variant);
if (!p_img) {
BOOST_LOG(error) << "Logical error at " << __FILE__ << ":" << __LINE__;
return capture_e::error;
}
img_out = *p_img;
break;
}
case ofa::copy_last_surface_and_blend_cursor: {
auto p_surface = std::get_if<texture2d_t>(&last_frame_variant);
if (!p_surface) {
BOOST_LOG(error) << "Logical error at " << __FILE__ << ":" << __LINE__;
return capture_e::error;
}
if (!blend_mouse_cursor_flag) {
BOOST_LOG(error) << "Logical error at " << __FILE__ << ":" << __LINE__;
return capture_e::error;
}
if (!pull_free_image_cb(img_out)) return capture_e::interrupted;
auto [d3d_img, lock] = get_locked_d3d_img(img_out);
if (!d3d_img) return capture_e::error;
device_ctx->CopyResource(d3d_img->capture_texture.get(), p_surface->get());
blend_cursor(*d3d_img);
break;
}
case ofa::dummy_fallback: {
if (!pull_free_image_cb(img_out)) return capture_e::interrupted;
// Clear the image if it has been used as a dummy.
// It can have the mouse cursor blended onto it.
auto old_d3d_img = (img_d3d_t *) img_out.get();
bool reclear_dummy = !old_d3d_img->blank && old_d3d_img->capture_texture;
auto [d3d_img, lock] = get_locked_d3d_img(img_out, true);
if (!d3d_img) return capture_e::error;
if (reclear_dummy) {
const float rgb_black[] = { 0.0f, 0.0f, 0.0f, 0.0f };
device_ctx->ClearRenderTargetView(d3d_img->capture_rt.get(), rgb_black);
}
if (blend_mouse_cursor_flag) {
blend_cursor(*d3d_img);
}
break;
}
}
// Perform delayed destruction of the unused surface if the time is due.
if (old_surface_delayed_destruction && old_surface_timestamp + 10s < std::chrono::steady_clock::now()) {
old_surface_delayed_destruction.reset();
}
if (img_out) {
img_out->frame_timestamp = frame_timestamp;
}
return capture_e::ok;
}
capture_e
display_ddup_vram_t::release_snapshot() {
return dup.release_frame();
}
int
display_ddup_vram_t::init(const ::video::config_t &config, const std::string &display_name) {
if (display_base_t::init(config, display_name) || dup.init(this, config)) {
return -1;
}
D3D11_SAMPLER_DESC sampler_desc {};
sampler_desc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
sampler_desc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP;
sampler_desc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP;
sampler_desc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.ComparisonFunc = D3D11_COMPARISON_NEVER;
sampler_desc.MinLOD = 0;
sampler_desc.MaxLOD = D3D11_FLOAT32_MAX;
auto status = device->CreateSamplerState(&sampler_desc, &sampler_linear);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create point sampler state [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
status = device->CreateVertexShader(cursor_vs_hlsl->GetBufferPointer(), cursor_vs_hlsl->GetBufferSize(), nullptr, &cursor_vs);
if (status) {
BOOST_LOG(error) << "Failed to create scene vertex shader [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
{
int32_t rotation_modifier = display_rotation == DXGI_MODE_ROTATION_UNSPECIFIED ? 0 : display_rotation - 1;
int32_t rotation_data[16 / sizeof(int32_t)] { rotation_modifier }; // aligned to 16-byte
auto rotation = make_buffer(device.get(), rotation_data);
if (!rotation) {
BOOST_LOG(error) << "Failed to create display rotation vertex constant buffer";
return -1;
}
device_ctx->VSSetConstantBuffers(2, 1, &rotation);
}
if (config.dynamicRange && is_hdr()) {
// This shader will normalize scRGB white levels to a user-defined white level
status = device->CreatePixelShader(cursor_ps_normalize_white_hlsl->GetBufferPointer(), cursor_ps_normalize_white_hlsl->GetBufferSize(), nullptr, &cursor_ps);
if (status) {
BOOST_LOG(error) << "Failed to create cursor blending (normalized white) pixel shader [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
// Use a 300 nit target for the mouse cursor. We should really get
// the user's SDR white level in nits, but there is no API that
// provides that information to Win32 apps.
float white_multiplier_data[16 / sizeof(float)] { 300.0f / 80.f }; // aligned to 16-byte
auto white_multiplier = make_buffer(device.get(), white_multiplier_data);
if (!white_multiplier) {
BOOST_LOG(warning) << "Failed to create cursor blending (normalized white) white multiplier constant buffer";
return -1;
}
device_ctx->PSSetConstantBuffers(1, 1, &white_multiplier);
}
else {
status = device->CreatePixelShader(cursor_ps_hlsl->GetBufferPointer(), cursor_ps_hlsl->GetBufferSize(), nullptr, &cursor_ps);
if (status) {
BOOST_LOG(error) << "Failed to create cursor blending pixel shader [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
}
blend_alpha = make_blend(device.get(), true, false);
blend_invert = make_blend(device.get(), true, true);
blend_disable = make_blend(device.get(), false, false);
if (!blend_disable || !blend_alpha || !blend_invert) {
return -1;
}
device_ctx->OMSetBlendState(blend_disable.get(), nullptr, 0xFFFFFFFFu);
device_ctx->PSSetSamplers(0, 1, &sampler_linear);
device_ctx->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
return 0;
}
/**
* Get the next frame from the Windows.Graphics.Capture API and copy it into a new snapshot texture.
* @param pull_free_image_cb call this to get a new free image from the video subsystem.
* @param img_out the captured frame is returned here
* @param timeout how long to wait for the next frame
* @param cursor_visible
*/
capture_e
display_wgc_vram_t::snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) {
texture2d_t src;
uint64_t frame_qpc;
dup.set_cursor_visible(cursor_visible);
auto capture_status = dup.next_frame(timeout, &src, frame_qpc);
if (capture_status != capture_e::ok)
return capture_status;
auto frame_timestamp = std::chrono::steady_clock::now() - qpc_time_difference(qpc_counter(), frame_qpc);
D3D11_TEXTURE2D_DESC desc;
src->GetDesc(&desc);
// It's possible for our display enumeration to race with mode changes and result in
// mismatched image pool and desktop texture sizes. If this happens, just reinit again.
if (desc.Width != width_before_rotation || desc.Height != height_before_rotation) {
BOOST_LOG(info) << "Capture size changed ["sv << width << 'x' << height << " -> "sv << desc.Width << 'x' << desc.Height << ']';
return capture_e::reinit;
}
// It's also possible for the capture format to change on the fly. If that happens,
// reinitialize capture to try format detection again and create new images.
if (capture_format != desc.Format) {
BOOST_LOG(info) << "Capture format changed ["sv << dxgi_format_to_string(capture_format) << " -> "sv << dxgi_format_to_string(desc.Format) << ']';
return capture_e::reinit;
}
std::shared_ptr<platf::img_t> img;
if (!pull_free_image_cb(img))
return capture_e::interrupted;
auto d3d_img = std::static_pointer_cast<img_d3d_t>(img);
d3d_img->blank = false; // image is always ready for capture
if (complete_img(d3d_img.get(), false) == 0) {
texture_lock_helper lock_helper(d3d_img->capture_mutex.get());
if (lock_helper.lock()) {
device_ctx->CopyResource(d3d_img->capture_texture.get(), src.get());
}
else {
BOOST_LOG(error) << "Failed to lock capture texture";
return capture_e::error;
}
}
else {
return capture_e::error;
}
img_out = img;
if (img_out) {
img_out->frame_timestamp = frame_timestamp;
}
return capture_e::ok;
}
capture_e
display_wgc_vram_t::release_snapshot() {
return dup.release_frame();
}
int
display_wgc_vram_t::init(const ::video::config_t &config, const std::string &display_name) {
if (display_base_t::init(config, display_name) || dup.init(this, config))
return -1;
return 0;
}
std::shared_ptr<platf::img_t>
display_vram_t::alloc_img() {
auto img = std::make_shared<img_d3d_t>();
// Initialize format-independent fields
img->width = width_before_rotation;
img->height = height_before_rotation;
img->id = next_image_id++;
img->blank = true;
return img;
}
// This cannot use ID3D11DeviceContext because it can be called concurrently by the encoding thread
int
display_vram_t::complete_img(platf::img_t *img_base, bool dummy) {
auto img = (img_d3d_t *) img_base;
// If this already has a capture texture and it's not switching dummy state, nothing to do
if (img->capture_texture && img->dummy == dummy) {
return 0;
}
// If this is not a dummy image, we must know the format by now
if (!dummy && capture_format == DXGI_FORMAT_UNKNOWN) {
BOOST_LOG(error) << "display_vram_t::complete_img() called with unknown capture format!";
return -1;
}
// Reset the image (in case this was previously a dummy)
img->capture_texture.reset();
img->capture_rt.reset();
img->capture_mutex.reset();
img->data = nullptr;
if (img->encoder_texture_handle) {
CloseHandle(img->encoder_texture_handle);
img->encoder_texture_handle = NULL;
}
// Initialize format-dependent fields
img->pixel_pitch = get_pixel_pitch();
img->row_pitch = img->pixel_pitch * img->width;
img->dummy = dummy;
img->format = (capture_format == DXGI_FORMAT_UNKNOWN) ? DXGI_FORMAT_B8G8R8A8_UNORM : capture_format;
D3D11_TEXTURE2D_DESC t {};
t.Width = img->width;
t.Height = img->height;
t.MipLevels = 1;
t.ArraySize = 1;
t.SampleDesc.Count = 1;
t.Usage = D3D11_USAGE_DEFAULT;
t.Format = img->format;
t.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
t.MiscFlags = D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
auto status = device->CreateTexture2D(&t, nullptr, &img->capture_texture);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create img buf texture [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
status = device->CreateRenderTargetView(img->capture_texture.get(), nullptr, &img->capture_rt);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create render target view [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
// Get the keyed mutex to synchronize with the encoding code
status = img->capture_texture->QueryInterface(__uuidof(IDXGIKeyedMutex), (void **) &img->capture_mutex);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to query IDXGIKeyedMutex [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
resource1_t resource;
status = img->capture_texture->QueryInterface(__uuidof(IDXGIResource1), (void **) &resource);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to query IDXGIResource1 [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
// Create a handle for the encoder device to use to open this texture
status = resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr, &img->encoder_texture_handle);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create shared texture handle [0x"sv << util::hex(status).to_string_view() << ']';
return -1;
}
img->data = (std::uint8_t *) img->capture_texture.get();
return 0;
}
// This cannot use ID3D11DeviceContext because it can be called concurrently by the encoding thread
/**
* @memberof platf::dxgi::display_vram_t
*/
int
display_vram_t::dummy_img(platf::img_t *img_base) {
return complete_img(img_base, true);
}
std::vector<DXGI_FORMAT>
display_vram_t::get_supported_capture_formats() {
return {
// scRGB FP16 is the ideal format for Wide Color Gamut and Advanced Color
// displays (both SDR and HDR). This format uses linear gamma, so we will
// use a linear->PQ shader for HDR and a linear->sRGB shader for SDR.
DXGI_FORMAT_R16G16B16A16_FLOAT,
// DXGI_FORMAT_R10G10B10A2_UNORM seems like it might give us frames already
// converted to SMPTE 2084 PQ, however it seems to actually just clamp the
// scRGB FP16 values that DWM is using when the desktop format is scRGB FP16.
//
// If there is a case where the desktop format is really SMPTE 2084 PQ, it
// might make sense to support capturing it without conversion to scRGB,
// but we avoid it for now.
// We include the 8-bit modes too for when the display is in SDR mode,
// while the client stream is HDR-capable. These UNORM formats can
// use our normal pixel shaders that expect sRGB input.
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_FORMAT_B8G8R8X8_UNORM,
DXGI_FORMAT_R8G8B8A8_UNORM,
};
}
/**
* @brief Check that a given codec is supported by the display device.
* @param name The FFmpeg codec name (or similar for non-FFmpeg codecs).
* @param config The codec configuration.
* @return `true` if supported, `false` otherwise.
*/
bool
display_vram_t::is_codec_supported(std::string_view name, const ::video::config_t &config) {
DXGI_ADAPTER_DESC adapter_desc;
adapter->GetDesc(&adapter_desc);
if (adapter_desc.VendorId == 0x1002) { // AMD
// If it's not an AMF encoder, it's not compatible with an AMD GPU
if (!boost::algorithm::ends_with(name, "_amf")) {
return false;
}
// Perform AMF version checks if we're using an AMD GPU. This check is placed in display_vram_t
// to avoid hitting the display_ram_t path which uses software encoding and doesn't touch AMF.
HMODULE amfrt = LoadLibraryW(AMF_DLL_NAME);
if (amfrt) {
auto unload_amfrt = util::fail_guard([amfrt]() {
FreeLibrary(amfrt);
});
auto fnAMFQueryVersion = (AMFQueryVersion_Fn) GetProcAddress(amfrt, AMF_QUERY_VERSION_FUNCTION_NAME);
if (fnAMFQueryVersion) {
amf_uint64 version;
auto result = fnAMFQueryVersion(&version);
if (result == AMF_OK) {
if (config.videoFormat == 2 && version < AMF_MAKE_FULL_VERSION(1, 4, 30, 0)) {
// AMF 1.4.30 adds ultra low latency mode for AV1. Don't use AV1 on earlier versions.
// This corresponds to driver version 23.5.2 (23.10.01.45) or newer.
BOOST_LOG(warning) << "AV1 encoding is disabled on AMF version "sv
<< AMF_GET_MAJOR_VERSION(version) << '.'
<< AMF_GET_MINOR_VERSION(version) << '.'
<< AMF_GET_SUBMINOR_VERSION(version) << '.'
<< AMF_GET_BUILD_VERSION(version);
BOOST_LOG(warning) << "If your AMD GPU supports AV1 encoding, update your graphics drivers!"sv;
return false;
}
else if (config.dynamicRange && version < AMF_MAKE_FULL_VERSION(1, 4, 23, 0)) {
// Older versions of the AMD AMF runtime can crash when fed P010 surfaces.
// Fail if AMF version is below 1.4.23 where HEVC Main10 encoding was introduced.
// AMF 1.4.23 corresponds to driver version 21.12.1 (21.40.11.03) or newer.
BOOST_LOG(warning) << "HDR encoding is disabled on AMF version "sv
<< AMF_GET_MAJOR_VERSION(version) << '.'
<< AMF_GET_MINOR_VERSION(version) << '.'
<< AMF_GET_SUBMINOR_VERSION(version) << '.'
<< AMF_GET_BUILD_VERSION(version);
BOOST_LOG(warning) << "If your AMD GPU supports HEVC Main10 encoding, update your graphics drivers!"sv;
return false;
}
}
else {
BOOST_LOG(warning) << "AMFQueryVersion() failed: "sv << result;
}
}
else {
BOOST_LOG(warning) << "AMF DLL missing export: "sv << AMF_QUERY_VERSION_FUNCTION_NAME;
}
}
else {
BOOST_LOG(warning) << "Detected AMD GPU but AMF failed to load"sv;
}
}
else if (adapter_desc.VendorId == 0x8086) { // Intel
// If it's not a QSV encoder, it's not compatible with an Intel GPU
if (!boost::algorithm::ends_with(name, "_qsv")) {
return false;
}
if (config.chromaSamplingType == 1) {
if (config.videoFormat == 0 || config.videoFormat == 2) {
// QSV doesn't support 4:4:4 in H.264 or AV1
return false;
}
// TODO: Blacklist HEVC 4:4:4 based on adapter model
}
}
else if (adapter_desc.VendorId == 0x10de) { // Nvidia
// If it's not an NVENC encoder, it's not compatible with an Nvidia GPU
if (!boost::algorithm::ends_with(name, "_nvenc")) {
return false;
}
}
else {
BOOST_LOG(warning) << "Unknown GPU vendor ID: " << util::hex(adapter_desc.VendorId).to_string_view();
}
return true;
}
std::unique_ptr<avcodec_encode_device_t>
display_vram_t::make_avcodec_encode_device(pix_fmt_e pix_fmt) {
auto device = std::make_unique<d3d_avcodec_encode_device_t>();
if (device->init(shared_from_this(), adapter.get(), pix_fmt) != 0) {
return nullptr;
}
return device;
}
std::unique_ptr<nvenc_encode_device_t>
display_vram_t::make_nvenc_encode_device(pix_fmt_e pix_fmt) {
auto device = std::make_unique<d3d_nvenc_encode_device_t>();
if (!device->init_device(shared_from_this(), adapter.get(), pix_fmt)) {
return nullptr;
}
return device;
}
int
init() {
BOOST_LOG(info) << "Compiling shaders..."sv;
#define compile_vertex_shader_helper(x) \
if (!(x##_hlsl = compile_vertex_shader(SUNSHINE_SHADERS_DIR "/" #x ".hlsl"))) return -1;
#define compile_pixel_shader_helper(x) \
if (!(x##_hlsl = compile_pixel_shader(SUNSHINE_SHADERS_DIR "/" #x ".hlsl"))) return -1;
compile_pixel_shader_helper(convert_yuv420_packed_uv_type0_ps);
compile_pixel_shader_helper(convert_yuv420_packed_uv_type0_ps_linear);
compile_pixel_shader_helper(convert_yuv420_packed_uv_type0_ps_perceptual_quantizer);
compile_vertex_shader_helper(convert_yuv420_packed_uv_type0_vs);
compile_pixel_shader_helper(convert_yuv420_packed_uv_type0s_ps);
compile_pixel_shader_helper(convert_yuv420_packed_uv_type0s_ps_linear);
compile_pixel_shader_helper(convert_yuv420_packed_uv_type0s_ps_perceptual_quantizer);
compile_vertex_shader_helper(convert_yuv420_packed_uv_type0s_vs);
compile_pixel_shader_helper(convert_yuv420_planar_y_ps);
compile_pixel_shader_helper(convert_yuv420_planar_y_ps_linear);
compile_pixel_shader_helper(convert_yuv420_planar_y_ps_perceptual_quantizer);
compile_vertex_shader_helper(convert_yuv420_planar_y_vs);
compile_pixel_shader_helper(convert_yuv444_packed_ayuv_ps);
compile_pixel_shader_helper(convert_yuv444_packed_ayuv_ps_linear);
compile_vertex_shader_helper(convert_yuv444_packed_vs);
compile_pixel_shader_helper(convert_yuv444_planar_ps);
compile_pixel_shader_helper(convert_yuv444_planar_ps_linear);
compile_pixel_shader_helper(convert_yuv444_planar_ps_perceptual_quantizer);
compile_pixel_shader_helper(convert_yuv444_packed_y410_ps);
compile_pixel_shader_helper(convert_yuv444_packed_y410_ps_linear);
compile_pixel_shader_helper(convert_yuv444_packed_y410_ps_perceptual_quantizer);
compile_vertex_shader_helper(convert_yuv444_planar_vs);
compile_pixel_shader_helper(cursor_ps);
compile_pixel_shader_helper(cursor_ps_normalize_white);
compile_vertex_shader_helper(cursor_vs);
BOOST_LOG(info) << "Compiled shaders"sv;
#undef compile_vertex_shader_helper
#undef compile_pixel_shader_helper
return 0;
}
} // namespace platf::dxgi
| 75,856
|
C++
|
.cpp
| 1,665
| 38.086486
| 177
| 0.634967
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,021
|
driver_settings.cpp
|
LizardByte_Sunshine/src/platform/windows/nvprefs/driver_settings.cpp
|
/**
* @file src/platform/windows/nvprefs/driver_settings.cpp
* @brief Definitions for nvidia driver settings.
*/
// local includes
#include "driver_settings.h"
#include "nvprefs_common.h"
namespace {
const auto sunshine_application_profile_name = L"SunshineStream";
const auto sunshine_application_path = L"sunshine.exe";
void
nvapi_error_message(NvAPI_Status status) {
NvAPI_ShortString message = {};
NvAPI_GetErrorMessage(status, message);
nvprefs::error_message(std::string("NvAPI error: ") + message);
}
void
fill_nvapi_string(NvAPI_UnicodeString &dest, const wchar_t *src) {
static_assert(sizeof(NvU16) == sizeof(wchar_t));
memcpy_s(dest, NVAPI_UNICODE_STRING_MAX * sizeof(NvU16), src, (wcslen(src) + 1) * sizeof(wchar_t));
}
} // namespace
namespace nvprefs {
driver_settings_t::~driver_settings_t() {
if (session_handle) {
NvAPI_DRS_DestroySession(session_handle);
}
}
bool
driver_settings_t::init() {
if (session_handle) return true;
NvAPI_Status status;
status = NvAPI_Initialize();
if (status != NVAPI_OK) {
info_message("NvAPI_Initialize() failed, ignore if you don't have NVIDIA video card");
return false;
}
status = NvAPI_DRS_CreateSession(&session_handle);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_CreateSession() failed");
return false;
}
return load_settings();
}
void
driver_settings_t::destroy() {
if (session_handle) {
NvAPI_DRS_DestroySession(session_handle);
session_handle = 0;
}
NvAPI_Unload();
}
bool
driver_settings_t::load_settings() {
if (!session_handle) return false;
NvAPI_Status status = NvAPI_DRS_LoadSettings(session_handle);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_LoadSettings() failed");
destroy();
return false;
}
return true;
}
bool
driver_settings_t::save_settings() {
if (!session_handle) return false;
NvAPI_Status status = NvAPI_DRS_SaveSettings(session_handle);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_SaveSettings() failed");
return false;
}
return true;
}
bool
driver_settings_t::restore_global_profile_to_undo(const undo_data_t &undo_data) {
if (!session_handle) return false;
const auto &swapchain_data = undo_data.get_opengl_swapchain();
if (swapchain_data) {
NvAPI_Status status;
NvDRSProfileHandle profile_handle = 0;
status = NvAPI_DRS_GetBaseProfile(session_handle, &profile_handle);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_GetBaseProfile() failed");
return false;
}
NVDRS_SETTING setting = {};
setting.version = NVDRS_SETTING_VER;
status = NvAPI_DRS_GetSetting(session_handle, profile_handle, OGL_CPL_PREFER_DXPRESENT_ID, &setting);
if (status == NVAPI_OK && setting.settingLocation == NVDRS_CURRENT_PROFILE_LOCATION && setting.u32CurrentValue == swapchain_data->our_value) {
if (swapchain_data->undo_value) {
setting = {};
setting.version = NVDRS_SETTING_VER1;
setting.settingId = OGL_CPL_PREFER_DXPRESENT_ID;
setting.settingType = NVDRS_DWORD_TYPE;
setting.settingLocation = NVDRS_CURRENT_PROFILE_LOCATION;
setting.u32CurrentValue = *swapchain_data->undo_value;
status = NvAPI_DRS_SetSetting(session_handle, profile_handle, &setting);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_SetSetting() OGL_CPL_PREFER_DXPRESENT failed");
return false;
}
}
else {
status = NvAPI_DRS_DeleteProfileSetting(session_handle, profile_handle, OGL_CPL_PREFER_DXPRESENT_ID);
if (status != NVAPI_OK && status != NVAPI_SETTING_NOT_FOUND) {
nvapi_error_message(status);
error_message("NvAPI_DRS_DeleteProfileSetting() OGL_CPL_PREFER_DXPRESENT failed");
return false;
}
}
info_message("Restored OGL_CPL_PREFER_DXPRESENT for base profile");
}
else if (status == NVAPI_OK || status == NVAPI_SETTING_NOT_FOUND) {
info_message("OGL_CPL_PREFER_DXPRESENT has been changed from our value in base profile, not restoring");
}
else {
error_message("NvAPI_DRS_GetSetting() OGL_CPL_PREFER_DXPRESENT failed");
return false;
}
}
return true;
}
bool
driver_settings_t::check_and_modify_global_profile(std::optional<undo_data_t> &undo_data) {
if (!session_handle) return false;
undo_data.reset();
NvAPI_Status status;
if (!get_nvprefs_options().opengl_vulkan_on_dxgi) {
// User requested to leave OpenGL/Vulkan DXGI swapchain setting alone
return true;
}
NvDRSProfileHandle profile_handle = 0;
status = NvAPI_DRS_GetBaseProfile(session_handle, &profile_handle);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_GetBaseProfile() failed");
return false;
}
NVDRS_SETTING setting = {};
setting.version = NVDRS_SETTING_VER;
status = NvAPI_DRS_GetSetting(session_handle, profile_handle, OGL_CPL_PREFER_DXPRESENT_ID, &setting);
// Remember current OpenGL/Vulkan DXGI swapchain setting and change it if needed
if (status == NVAPI_SETTING_NOT_FOUND || (status == NVAPI_OK && setting.u32CurrentValue != OGL_CPL_PREFER_DXPRESENT_PREFER_ENABLED)) {
undo_data = undo_data_t();
if (status == NVAPI_OK) {
undo_data->set_opengl_swapchain(OGL_CPL_PREFER_DXPRESENT_PREFER_ENABLED, setting.u32CurrentValue);
}
else {
undo_data->set_opengl_swapchain(OGL_CPL_PREFER_DXPRESENT_PREFER_ENABLED, std::nullopt);
}
setting = {};
setting.version = NVDRS_SETTING_VER1;
setting.settingId = OGL_CPL_PREFER_DXPRESENT_ID;
setting.settingType = NVDRS_DWORD_TYPE;
setting.settingLocation = NVDRS_CURRENT_PROFILE_LOCATION;
setting.u32CurrentValue = OGL_CPL_PREFER_DXPRESENT_PREFER_ENABLED;
status = NvAPI_DRS_SetSetting(session_handle, profile_handle, &setting);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_SetSetting() OGL_CPL_PREFER_DXPRESENT failed");
return false;
}
info_message("Changed OGL_CPL_PREFER_DXPRESENT to OGL_CPL_PREFER_DXPRESENT_PREFER_ENABLED for base profile");
}
else if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_GetSetting() OGL_CPL_PREFER_DXPRESENT failed");
return false;
}
return true;
}
bool
driver_settings_t::check_and_modify_application_profile(bool &modified) {
if (!session_handle) return false;
modified = false;
NvAPI_Status status;
NvAPI_UnicodeString profile_name = {};
fill_nvapi_string(profile_name, sunshine_application_profile_name);
NvDRSProfileHandle profile_handle = 0;
status = NvAPI_DRS_FindProfileByName(session_handle, profile_name, &profile_handle);
if (status != NVAPI_OK) {
// Create application profile if missing
NVDRS_PROFILE profile = {};
profile.version = NVDRS_PROFILE_VER1;
fill_nvapi_string(profile.profileName, sunshine_application_profile_name);
status = NvAPI_DRS_CreateProfile(session_handle, &profile, &profile_handle);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_CreateProfile() failed");
return false;
}
modified = true;
}
NvAPI_UnicodeString sunshine_path = {};
fill_nvapi_string(sunshine_path, sunshine_application_path);
NVDRS_APPLICATION application = {};
application.version = NVDRS_APPLICATION_VER_V1;
status = NvAPI_DRS_GetApplicationInfo(session_handle, profile_handle, sunshine_path, &application);
if (status != NVAPI_OK) {
// Add application to application profile if missing
application.version = NVDRS_APPLICATION_VER_V1;
application.isPredefined = 0;
fill_nvapi_string(application.appName, sunshine_application_path);
fill_nvapi_string(application.userFriendlyName, sunshine_application_path);
fill_nvapi_string(application.launcher, L"");
status = NvAPI_DRS_CreateApplication(session_handle, profile_handle, &application);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_CreateApplication() failed");
return false;
}
modified = true;
}
NVDRS_SETTING setting = {};
setting.version = NVDRS_SETTING_VER1;
status = NvAPI_DRS_GetSetting(session_handle, profile_handle, PREFERRED_PSTATE_ID, &setting);
if (!get_nvprefs_options().sunshine_high_power_mode) {
if (status == NVAPI_OK &&
setting.settingLocation == NVDRS_CURRENT_PROFILE_LOCATION) {
// User requested to not use high power mode for sunshine.exe,
// remove the setting from application profile if it's been set previously
status = NvAPI_DRS_DeleteProfileSetting(session_handle, profile_handle, PREFERRED_PSTATE_ID);
if (status != NVAPI_OK && status != NVAPI_SETTING_NOT_FOUND) {
nvapi_error_message(status);
error_message("NvAPI_DRS_DeleteProfileSetting() PREFERRED_PSTATE failed");
return false;
}
modified = true;
info_message(std::wstring(L"Removed PREFERRED_PSTATE for ") + sunshine_application_path);
}
}
else if (status != NVAPI_OK ||
setting.settingLocation != NVDRS_CURRENT_PROFILE_LOCATION ||
setting.u32CurrentValue != PREFERRED_PSTATE_PREFER_MAX) {
// Set power setting if needed
setting = {};
setting.version = NVDRS_SETTING_VER1;
setting.settingId = PREFERRED_PSTATE_ID;
setting.settingType = NVDRS_DWORD_TYPE;
setting.settingLocation = NVDRS_CURRENT_PROFILE_LOCATION;
setting.u32CurrentValue = PREFERRED_PSTATE_PREFER_MAX;
status = NvAPI_DRS_SetSetting(session_handle, profile_handle, &setting);
if (status != NVAPI_OK) {
nvapi_error_message(status);
error_message("NvAPI_DRS_SetSetting() PREFERRED_PSTATE failed");
return false;
}
modified = true;
info_message(std::wstring(L"Changed PREFERRED_PSTATE to PREFERRED_PSTATE_PREFER_MAX for ") + sunshine_application_path);
}
return true;
}
} // namespace nvprefs
| 10,645
|
C++
|
.cpp
| 258
| 34.666667
| 148
| 0.67583
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,022
|
nvapi_opensource_wrapper.cpp
|
LizardByte_Sunshine/src/platform/windows/nvprefs/nvapi_opensource_wrapper.cpp
|
/**
* @file src/platform/windows/nvprefs/nvapi_opensource_wrapper.cpp
* @brief Definitions for the NVAPI wrapper.
*/
// standard library headers
#include <map>
// local includes
#include "driver_settings.h"
#include "nvprefs_common.h"
// special nvapi header that should be the last include
#include <nvapi_interface.h>
namespace {
std::map<const char *, void *> interfaces;
HMODULE dll = NULL;
template <typename Func, typename... Args>
NvAPI_Status
call_interface(const char *name, Args... args) {
auto func = (Func *) interfaces[name];
if (!func) {
return interfaces.empty() ? NVAPI_API_NOT_INITIALIZED : NVAPI_NOT_SUPPORTED;
}
return func(args...);
}
} // namespace
#undef NVAPI_INTERFACE
#define NVAPI_INTERFACE NvAPI_Status __cdecl
extern void *__cdecl nvapi_QueryInterface(NvU32 id);
NVAPI_INTERFACE
NvAPI_Initialize() {
if (dll) return NVAPI_OK;
#ifdef _WIN64
auto dll_name = "nvapi64.dll";
#else
auto dll_name = "nvapi.dll";
#endif
if ((dll = LoadLibraryEx(dll_name, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32))) {
if (auto query_interface = (decltype(nvapi_QueryInterface) *) GetProcAddress(dll, "nvapi_QueryInterface")) {
for (const auto &item : nvapi_interface_table) {
interfaces[item.func] = query_interface(item.id);
}
return NVAPI_OK;
}
}
NvAPI_Unload();
return NVAPI_LIBRARY_NOT_FOUND;
}
NVAPI_INTERFACE
NvAPI_Unload() {
if (dll) {
interfaces.clear();
FreeLibrary(dll);
dll = NULL;
}
return NVAPI_OK;
}
NVAPI_INTERFACE
NvAPI_GetErrorMessage(NvAPI_Status nr, NvAPI_ShortString szDesc) {
return call_interface<decltype(NvAPI_GetErrorMessage)>("NvAPI_GetErrorMessage", nr, szDesc);
}
// This is only a subset of NvAPI_DRS_* functions, more can be added if needed
NVAPI_INTERFACE
NvAPI_DRS_CreateSession(NvDRSSessionHandle *phSession) {
return call_interface<decltype(NvAPI_DRS_CreateSession)>("NvAPI_DRS_CreateSession", phSession);
}
NVAPI_INTERFACE
NvAPI_DRS_DestroySession(NvDRSSessionHandle hSession) {
return call_interface<decltype(NvAPI_DRS_DestroySession)>("NvAPI_DRS_DestroySession", hSession);
}
NVAPI_INTERFACE
NvAPI_DRS_LoadSettings(NvDRSSessionHandle hSession) {
return call_interface<decltype(NvAPI_DRS_LoadSettings)>("NvAPI_DRS_LoadSettings", hSession);
}
NVAPI_INTERFACE
NvAPI_DRS_SaveSettings(NvDRSSessionHandle hSession) {
return call_interface<decltype(NvAPI_DRS_SaveSettings)>("NvAPI_DRS_SaveSettings", hSession);
}
NVAPI_INTERFACE
NvAPI_DRS_CreateProfile(NvDRSSessionHandle hSession, NVDRS_PROFILE *pProfileInfo, NvDRSProfileHandle *phProfile) {
return call_interface<decltype(NvAPI_DRS_CreateProfile)>("NvAPI_DRS_CreateProfile", hSession, pProfileInfo, phProfile);
}
NVAPI_INTERFACE
NvAPI_DRS_FindProfileByName(NvDRSSessionHandle hSession, NvAPI_UnicodeString profileName, NvDRSProfileHandle *phProfile) {
return call_interface<decltype(NvAPI_DRS_FindProfileByName)>("NvAPI_DRS_FindProfileByName", hSession, profileName, phProfile);
}
NVAPI_INTERFACE
NvAPI_DRS_CreateApplication(NvDRSSessionHandle hSession, NvDRSProfileHandle hProfile, NVDRS_APPLICATION *pApplication) {
return call_interface<decltype(NvAPI_DRS_CreateApplication)>("NvAPI_DRS_CreateApplication", hSession, hProfile, pApplication);
}
NVAPI_INTERFACE
NvAPI_DRS_GetApplicationInfo(NvDRSSessionHandle hSession, NvDRSProfileHandle hProfile, NvAPI_UnicodeString appName, NVDRS_APPLICATION *pApplication) {
return call_interface<decltype(NvAPI_DRS_GetApplicationInfo)>("NvAPI_DRS_GetApplicationInfo", hSession, hProfile, appName, pApplication);
}
NVAPI_INTERFACE
NvAPI_DRS_SetSetting(NvDRSSessionHandle hSession, NvDRSProfileHandle hProfile, NVDRS_SETTING *pSetting) {
return call_interface<decltype(NvAPI_DRS_SetSetting)>("NvAPI_DRS_SetSetting", hSession, hProfile, pSetting);
}
NVAPI_INTERFACE
NvAPI_DRS_GetSetting(NvDRSSessionHandle hSession, NvDRSProfileHandle hProfile, NvU32 settingId, NVDRS_SETTING *pSetting) {
return call_interface<decltype(NvAPI_DRS_GetSetting)>("NvAPI_DRS_GetSetting", hSession, hProfile, settingId, pSetting);
}
NVAPI_INTERFACE
NvAPI_DRS_DeleteProfileSetting(NvDRSSessionHandle hSession, NvDRSProfileHandle hProfile, NvU32 settingId) {
return call_interface<decltype(NvAPI_DRS_DeleteProfileSetting)>("NvAPI_DRS_DeleteProfileSetting", hSession, hProfile, settingId);
}
NVAPI_INTERFACE
NvAPI_DRS_GetBaseProfile(NvDRSSessionHandle hSession, NvDRSProfileHandle *phProfile) {
return call_interface<decltype(NvAPI_DRS_GetBaseProfile)>("NvAPI_DRS_GetBaseProfile", hSession, phProfile);
}
| 4,557
|
C++
|
.cpp
| 108
| 39.731481
| 150
| 0.791176
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,023
|
nvprefs_interface.cpp
|
LizardByte_Sunshine/src/platform/windows/nvprefs/nvprefs_interface.cpp
|
/**
* @file src/platform/windows/nvprefs/nvprefs_interface.cpp
* @brief Definitions for nvidia preferences interface.
*/
// standard includes
#include <cassert>
// local includes
#include "driver_settings.h"
#include "nvprefs_interface.h"
#include "undo_file.h"
namespace {
const auto sunshine_program_data_folder = "Sunshine";
const auto nvprefs_undo_file_name = "nvprefs_undo.json";
} // namespace
namespace nvprefs {
struct nvprefs_interface::impl {
bool loaded = false;
driver_settings_t driver_settings;
std::filesystem::path undo_folder_path;
std::filesystem::path undo_file_path;
std::optional<undo_data_t> undo_data;
std::optional<undo_file_t> undo_file;
};
nvprefs_interface::nvprefs_interface():
pimpl(new impl()) {
}
nvprefs_interface::~nvprefs_interface() {
if (owning_undo_file() && load()) {
restore_global_profile();
}
unload();
}
bool
nvprefs_interface::load() {
if (!pimpl->loaded) {
// Check %ProgramData% variable, need it for storing undo file
wchar_t program_data_env[MAX_PATH];
auto get_env_result = GetEnvironmentVariableW(L"ProgramData", program_data_env, MAX_PATH);
if (get_env_result == 0 || get_env_result >= MAX_PATH || !std::filesystem::is_directory(program_data_env)) {
error_message("Missing or malformed %ProgramData% environment variable");
return false;
}
// Prepare undo file path variables
pimpl->undo_folder_path = std::filesystem::path(program_data_env) / sunshine_program_data_folder;
pimpl->undo_file_path = pimpl->undo_folder_path / nvprefs_undo_file_name;
// Dynamically load nvapi library and load driver settings
pimpl->loaded = pimpl->driver_settings.init();
}
return pimpl->loaded;
}
void
nvprefs_interface::unload() {
if (pimpl->loaded) {
// Unload dynamically loaded nvapi library
pimpl->driver_settings.destroy();
pimpl->loaded = false;
}
}
bool
nvprefs_interface::restore_from_and_delete_undo_file_if_exists() {
if (!pimpl->loaded) return false;
// Check for undo file from previous improper termination
bool access_denied = false;
if (auto undo_file = undo_file_t::open_existing_file(pimpl->undo_file_path, access_denied)) {
// Try to restore from the undo file
info_message("Opened undo file from previous improper termination");
if (auto undo_data = undo_file->read_undo_data()) {
if (pimpl->driver_settings.restore_global_profile_to_undo(*undo_data) && pimpl->driver_settings.save_settings()) {
info_message("Restored global profile settings from undo file - deleting the file");
}
else {
error_message("Failed to restore global profile settings from undo file, deleting the file anyway");
}
}
else {
error_message("Coulnd't read undo file, deleting the file anyway");
}
if (!undo_file->delete_file()) {
error_message("Couldn't delete undo file");
return false;
}
}
else if (access_denied) {
error_message("Couldn't open undo file from previous improper termination, or confirm that there's no such file");
return false;
}
return true;
}
bool
nvprefs_interface::modify_application_profile() {
if (!pimpl->loaded) return false;
// Modify and save sunshine.exe application profile settings, if needed
bool modified = false;
if (!pimpl->driver_settings.check_and_modify_application_profile(modified)) {
error_message("Failed to modify application profile settings");
return false;
}
else if (modified) {
if (pimpl->driver_settings.save_settings()) {
info_message("Modified application profile settings");
}
else {
error_message("Couldn't save application profile settings");
return false;
}
}
else {
info_message("No need to modify application profile settings");
}
return true;
}
bool
nvprefs_interface::modify_global_profile() {
if (!pimpl->loaded) return false;
// Modify but not save global profile settings, if needed
std::optional<undo_data_t> undo_data;
if (!pimpl->driver_settings.check_and_modify_global_profile(undo_data)) {
error_message("Couldn't modify global profile settings");
return false;
}
else if (!undo_data) {
info_message("No need to modify global profile settings");
return true;
}
auto make_undo_and_commit = [&]() -> bool {
// Create and lock undo file if it hasn't been done yet
if (!pimpl->undo_file) {
// Prepare Sunshine folder in ProgramData if it doesn't exist
if (!CreateDirectoryW(pimpl->undo_folder_path.c_str(), nullptr) && GetLastError() != ERROR_ALREADY_EXISTS) {
error_message("Couldn't create undo folder");
return false;
}
// Create undo file to handle improper termination of nvprefs.exe
pimpl->undo_file = undo_file_t::create_new_file(pimpl->undo_file_path);
if (!pimpl->undo_file) {
error_message("Couldn't create undo file");
return false;
}
}
assert(undo_data);
if (pimpl->undo_data) {
// Merge undo data if settings has been modified externally since our last modification
pimpl->undo_data->merge(*undo_data);
}
else {
pimpl->undo_data = undo_data;
}
// Write undo data to undo file
if (!pimpl->undo_file->write_undo_data(*pimpl->undo_data)) {
error_message("Couldn't write to undo file - deleting the file");
if (!pimpl->undo_file->delete_file()) {
error_message("Couldn't delete undo file");
}
return false;
}
// Save global profile settings
if (!pimpl->driver_settings.save_settings()) {
error_message("Couldn't save global profile settings");
return false;
}
return true;
};
if (!make_undo_and_commit()) {
// Revert settings modifications
pimpl->driver_settings.load_settings();
return false;
}
return true;
}
bool
nvprefs_interface::owning_undo_file() {
return pimpl->undo_file.has_value();
}
bool
nvprefs_interface::restore_global_profile() {
if (!pimpl->loaded || !pimpl->undo_data || !pimpl->undo_file) return false;
// Restore global profile settings with undo data
if (pimpl->driver_settings.restore_global_profile_to_undo(*pimpl->undo_data) &&
pimpl->driver_settings.save_settings()) {
// Global profile settings sucessfully restored, can delete undo file
if (!pimpl->undo_file->delete_file()) {
error_message("Couldn't delete undo file");
return false;
}
pimpl->undo_data = std::nullopt;
pimpl->undo_file = std::nullopt;
}
else {
error_message("Couldn't restore global profile settings");
return false;
}
return true;
}
} // namespace nvprefs
| 7,043
|
C++
|
.cpp
| 194
| 30.21134
| 122
| 0.655709
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,024
|
undo_data.cpp
|
LizardByte_Sunshine/src/platform/windows/nvprefs/undo_data.cpp
|
/**
* @file src/platform/windows/nvprefs/undo_data.cpp
* @brief Definitions for undoing changes to nvidia preferences.
*/
// external includes
#include <nlohmann/json.hpp>
// local includes
#include "nvprefs_common.h"
#include "undo_data.h"
using json = nlohmann::json;
// Separate namespace for ADL, otherwise we need to define json
// functions in the same namespace as our types
namespace nlohmann {
using data_t = nvprefs::undo_data_t::data_t;
using opengl_swapchain_t = data_t::opengl_swapchain_t;
template <typename T>
struct adl_serializer<std::optional<T>> {
static void
to_json(json &j, const std::optional<T> &opt) {
if (opt == std::nullopt) {
j = nullptr;
}
else {
j = *opt;
}
}
static void
from_json(const json &j, std::optional<T> &opt) {
if (j.is_null()) {
opt = std::nullopt;
}
else {
opt = j.template get<T>();
}
}
};
template <>
struct adl_serializer<data_t> {
static void
to_json(json &j, const data_t &data) {
j = json { { "opengl_swapchain", data.opengl_swapchain } };
}
static void
from_json(const json &j, data_t &data) {
j.at("opengl_swapchain").get_to(data.opengl_swapchain);
}
};
template <>
struct adl_serializer<opengl_swapchain_t> {
static void
to_json(json &j, const opengl_swapchain_t &opengl_swapchain) {
j = json {
{ "our_value", opengl_swapchain.our_value },
{ "undo_value", opengl_swapchain.undo_value }
};
}
static void
from_json(const json &j, opengl_swapchain_t &opengl_swapchain) {
j.at("our_value").get_to(opengl_swapchain.our_value);
j.at("undo_value").get_to(opengl_swapchain.undo_value);
}
};
} // namespace nlohmann
namespace nvprefs {
void
undo_data_t::set_opengl_swapchain(uint32_t our_value, std::optional<uint32_t> undo_value) {
data.opengl_swapchain = data_t::opengl_swapchain_t {
our_value,
undo_value
};
}
std::optional<undo_data_t::data_t::opengl_swapchain_t>
undo_data_t::get_opengl_swapchain() const {
return data.opengl_swapchain;
}
std::string
undo_data_t::write() const {
try {
// Keep this assignment otherwise data will be treated as an array due to
// initializer list shenanigangs.
const json json_data = data;
return json_data.dump();
}
catch (const std::exception &err) {
error_message(std::string { "failed to serialize json data" });
return {};
}
}
void
undo_data_t::read(const std::vector<char> &buffer) {
try {
data = json::parse(std::begin(buffer), std::end(buffer));
}
catch (const std::exception &err) {
error_message(std::string { "failed to parse json data: " } + err.what());
data = {};
}
}
void
undo_data_t::merge(const undo_data_t &newer_data) {
const auto &swapchain_data = newer_data.get_opengl_swapchain();
if (swapchain_data) {
set_opengl_swapchain(swapchain_data->our_value, swapchain_data->undo_value);
}
}
} // namespace nvprefs
| 3,115
|
C++
|
.cpp
| 106
| 24.584906
| 93
| 0.638824
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,025
|
nvprefs_common.cpp
|
LizardByte_Sunshine/src/platform/windows/nvprefs/nvprefs_common.cpp
|
/**
* @file src/platform/windows/nvprefs/nvprefs_common.cpp
* @brief Definitions for common nvidia preferences.
*/
// local includes
#include "nvprefs_common.h"
#include "src/logging.h"
// read user override preferences from global sunshine config
#include "src/config.h"
namespace nvprefs {
void
info_message(const std::wstring &message) {
BOOST_LOG(info) << "nvprefs: " << message;
}
void
info_message(const std::string &message) {
BOOST_LOG(info) << "nvprefs: " << message;
}
void
error_message(const std::wstring &message) {
BOOST_LOG(error) << "nvprefs: " << message;
}
void
error_message(const std::string &message) {
BOOST_LOG(error) << "nvprefs: " << message;
}
nvprefs_options
get_nvprefs_options() {
nvprefs_options options;
options.opengl_vulkan_on_dxgi = config::video.nv_opengl_vulkan_on_dxgi;
options.sunshine_high_power_mode = config::video.nv_sunshine_high_power_mode;
return options;
}
} // namespace nvprefs
| 1,000
|
C++
|
.cpp
| 34
| 26.264706
| 81
| 0.705637
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,026
|
undo_file.cpp
|
LizardByte_Sunshine/src/platform/windows/nvprefs/undo_file.cpp
|
/**
* @file src/platform/windows/nvprefs/undo_file.cpp
* @brief Definitions for the nvidia undo file.
*/
// local includes
#include "undo_file.h"
namespace {
using namespace nvprefs;
DWORD
relax_permissions(HANDLE file_handle) {
PACL old_dacl = nullptr;
safe_hlocal<PSECURITY_DESCRIPTOR> sd;
DWORD status = GetSecurityInfo(file_handle, SE_FILE_OBJECT, DACL_SECURITY_INFORMATION, nullptr, nullptr, &old_dacl, nullptr, &sd);
if (status != ERROR_SUCCESS) return status;
safe_sid users_sid;
SID_IDENTIFIER_AUTHORITY nt_authorithy = SECURITY_NT_AUTHORITY;
if (!AllocateAndInitializeSid(&nt_authorithy, 2, SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_USERS, 0, 0, 0, 0, 0, 0, &users_sid)) {
return GetLastError();
}
EXPLICIT_ACCESS ea = {};
ea.grfAccessPermissions = GENERIC_READ | GENERIC_WRITE | DELETE;
ea.grfAccessMode = GRANT_ACCESS;
ea.grfInheritance = NO_INHERITANCE;
ea.Trustee.TrusteeForm = TRUSTEE_IS_SID;
ea.Trustee.ptstrName = (LPTSTR) users_sid.get();
safe_hlocal<PACL> new_dacl;
status = SetEntriesInAcl(1, &ea, old_dacl, &new_dacl);
if (status != ERROR_SUCCESS) return status;
status = SetSecurityInfo(file_handle, SE_FILE_OBJECT, DACL_SECURITY_INFORMATION, nullptr, nullptr, new_dacl.get(), nullptr);
if (status != ERROR_SUCCESS) return status;
return 0;
}
} // namespace
namespace nvprefs {
std::optional<undo_file_t>
undo_file_t::open_existing_file(std::filesystem::path file_path, bool &access_denied) {
undo_file_t file;
file.file_handle.reset(CreateFileW(file_path.c_str(), GENERIC_READ | DELETE, 0, nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL));
if (file.file_handle) {
access_denied = false;
return file;
}
else {
auto last_error = GetLastError();
access_denied = (last_error != ERROR_FILE_NOT_FOUND && last_error != ERROR_PATH_NOT_FOUND);
return std::nullopt;
}
}
std::optional<undo_file_t>
undo_file_t::create_new_file(std::filesystem::path file_path) {
undo_file_t file;
file.file_handle.reset(CreateFileW(file_path.c_str(), GENERIC_WRITE | STANDARD_RIGHTS_ALL, 0, nullptr, CREATE_NEW, FILE_ATTRIBUTE_NORMAL, NULL));
if (file.file_handle) {
// give GENERIC_READ, GENERIC_WRITE and DELETE permissions to Users group
if (relax_permissions(file.file_handle.get()) != 0) {
error_message("Failed to relax permissions on undo file");
}
return file;
}
else {
return std::nullopt;
}
}
bool
undo_file_t::delete_file() {
if (!file_handle) return false;
FILE_DISPOSITION_INFO delete_file_info = { TRUE };
if (SetFileInformationByHandle(file_handle.get(), FileDispositionInfo, &delete_file_info, sizeof(delete_file_info))) {
file_handle.reset();
return true;
}
else {
return false;
}
}
bool
undo_file_t::write_undo_data(const undo_data_t &undo_data) {
if (!file_handle) return false;
std::string buffer;
try {
buffer = undo_data.write();
}
catch (...) {
error_message("Couldn't serialize undo data");
return false;
}
if (!SetFilePointerEx(file_handle.get(), {}, nullptr, FILE_BEGIN) || !SetEndOfFile(file_handle.get())) {
error_message("Couldn't clear undo file");
return false;
}
DWORD bytes_written = 0;
if (!WriteFile(file_handle.get(), buffer.data(), buffer.size(), &bytes_written, nullptr) || bytes_written != buffer.size()) {
error_message("Couldn't write undo file");
return false;
}
if (!FlushFileBuffers(file_handle.get())) {
error_message("Failed to flush undo file");
}
return true;
}
std::optional<undo_data_t>
undo_file_t::read_undo_data() {
if (!file_handle) return std::nullopt;
LARGE_INTEGER file_size;
if (!GetFileSizeEx(file_handle.get(), &file_size)) {
error_message("Couldn't get undo file size");
return std::nullopt;
}
if ((size_t) file_size.QuadPart > 1024) {
error_message("Undo file size is unexpectedly large, aborting");
return std::nullopt;
}
std::vector<char> buffer(file_size.QuadPart);
DWORD bytes_read = 0;
if (!ReadFile(file_handle.get(), buffer.data(), buffer.size(), &bytes_read, nullptr) || bytes_read != buffer.size()) {
error_message("Couldn't read undo file");
return std::nullopt;
}
undo_data_t undo_data;
try {
undo_data.read(buffer);
}
catch (...) {
error_message("Couldn't parse undo file");
return std::nullopt;
}
return undo_data;
}
} // namespace nvprefs
| 4,655
|
C++
|
.cpp
| 129
| 30.937984
| 149
| 0.663628
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,027
|
tests_events.h
|
LizardByte_Sunshine/tests/tests_events.h
|
/**
* @file tests/tests_events.h
* @brief Declarations for SunshineEventListener.
*/
#pragma once
#include "tests_common.h"
struct SunshineEventListener: testing::EmptyTestEventListener {
SunshineEventListener() {
sink = boost::make_shared<sink_t>();
sink_buffer = boost::make_shared<std::stringstream>();
sink->locked_backend()->add_stream(sink_buffer);
sink->set_formatter(&logging::formatter);
}
void
OnTestProgramStart(const testing::UnitTest &unit_test) override {
boost::log::core::get()->add_sink(sink);
}
void
OnTestProgramEnd(const testing::UnitTest &unit_test) override {
boost::log::core::get()->remove_sink(sink);
}
void
OnTestStart(const testing::TestInfo &test_info) override {
BOOST_LOG(tests) << "From " << test_info.file() << ":" << test_info.line();
BOOST_LOG(tests) << " " << test_info.test_suite_name() << "/" << test_info.name() << " started";
}
void
OnTestPartResult(const testing::TestPartResult &test_part_result) override {
std::string file = test_part_result.file_name();
BOOST_LOG(tests) << "At " << file << ":" << test_part_result.line_number();
auto result_text = test_part_result.passed() ? "Success" :
test_part_result.nonfatally_failed() ? "Non-fatal failure" :
test_part_result.fatally_failed() ? "Failure" :
"Skip";
std::string summary = test_part_result.summary();
std::string message = test_part_result.message();
BOOST_LOG(tests) << " " << result_text << ": " << summary;
if (message != summary) {
BOOST_LOG(tests) << " " << message;
}
}
void
OnTestEnd(const testing::TestInfo &test_info) override {
auto &result = *test_info.result();
auto result_text = result.Passed() ? "passed" :
result.Skipped() ? "skipped" :
"failed";
BOOST_LOG(tests) << test_info.test_suite_name() << "/" << test_info.name() << " " << result_text;
if (result.Failed()) {
std::cout << sink_buffer->str();
}
sink_buffer->str("");
sink_buffer->clear();
}
using sink_t = boost::log::sinks::synchronous_sink<boost::log::sinks::text_ostream_backend>;
boost::shared_ptr<sink_t> sink;
boost::shared_ptr<std::stringstream> sink_buffer;
};
| 2,392
|
C++
|
.h
| 58
| 34.517241
| 101
| 0.604651
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,028
|
tests_environment.h
|
LizardByte_Sunshine/tests/tests_environment.h
|
/**
* @file tests/tests_environment.h
* @brief Declarations for SunshineEnvironment.
*/
#pragma once
#include "tests_common.h"
struct SunshineEnvironment: testing::Environment {
void
SetUp() override {
mail::man = std::make_shared<safe::mail_raw_t>();
deinit_log = logging::init(0, "test_sunshine.log");
}
void
TearDown() override {
deinit_log = {};
mail::man = {};
}
std::unique_ptr<logging::deinit_t> deinit_log;
};
| 454
|
C++
|
.h
| 19
| 21
| 55
| 0.678241
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,029
|
tests_log_checker.h
|
LizardByte_Sunshine/tests/tests_log_checker.h
|
/**
* @file tests/tests_log_checker.h
* @brief Utility functions to check log file contents.
*/
#pragma once
#include <algorithm>
#include <fstream>
#include <regex>
#include <string>
#include <src/logging.h>
namespace log_checker {
/**
* @brief Remove the timestamp prefix from a log line.
* @param line The log line.
* @return The log line without the timestamp prefix.
*/
inline std::string
remove_timestamp_prefix(const std::string &line) {
static const std::regex timestamp_regex(R"(\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}\]: )");
return std::regex_replace(line, timestamp_regex, "");
}
/**
* @brief Check if a log file contains a line that starts with the given string.
* @param log_file Path to the log file.
* @param start_str The string that the line should start with.
* @return True if such a line is found, false otherwise.
*/
inline bool
line_starts_with(const std::string &log_file, const std::string_view &start_str) {
logging::log_flush();
std::ifstream input(log_file);
if (!input.is_open()) {
return false;
}
for (std::string line; std::getline(input, line);) {
line = remove_timestamp_prefix(line);
if (line.rfind(start_str, 0) == 0) {
return true;
}
}
return false;
}
/**
* @brief Check if a log file contains a line that ends with the given string.
* @param log_file Path to the log file.
* @param end_str The string that the line should end with.
* @return True if such a line is found, false otherwise.
*/
inline bool
line_ends_with(const std::string &log_file, const std::string_view &end_str) {
logging::log_flush();
std::ifstream input(log_file);
if (!input.is_open()) {
return false;
}
for (std::string line; std::getline(input, line);) {
line = remove_timestamp_prefix(line);
if (line.size() >= end_str.size() &&
line.compare(line.size() - end_str.size(), end_str.size(), end_str) == 0) {
return true;
}
}
return false;
}
/**
* @brief Check if a log file contains a line that equals the given string.
* @param log_file Path to the log file.
* @param str The string that the line should equal.
* @return True if such a line is found, false otherwise.
*/
inline bool
line_equals(const std::string &log_file, const std::string_view &str) {
logging::log_flush();
std::ifstream input(log_file);
if (!input.is_open()) {
return false;
}
for (std::string line; std::getline(input, line);) {
line = remove_timestamp_prefix(line);
if (line == str) {
return true;
}
}
return false;
}
/**
* @brief Check if a log file contains a line that contains the given substring.
* @param log_file Path to the log file.
* @param substr The substring to search for.
* @param case_insensitive Whether the search should be case-insensitive.
* @return True if such a line is found, false otherwise.
*/
inline bool
line_contains(const std::string &log_file, const std::string_view &substr, bool case_insensitive = false) {
logging::log_flush();
std::ifstream input(log_file);
if (!input.is_open()) {
return false;
}
std::string search_str(substr);
if (case_insensitive) {
// sonarcloud complains about this, but the solution doesn't work for macOS-12
std::transform(search_str.begin(), search_str.end(), search_str.begin(), ::tolower);
}
for (std::string line; std::getline(input, line);) {
line = remove_timestamp_prefix(line);
if (case_insensitive) {
// sonarcloud complains about this, but the solution doesn't work for macOS-12
std::transform(line.begin(), line.end(), line.begin(), ::tolower);
}
if (line.find(search_str) != std::string::npos) {
return true;
}
}
return false;
}
} // namespace log_checker
| 3,963
|
C++
|
.h
| 117
| 29.059829
| 109
| 0.644201
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,030
|
tests_common.h
|
LizardByte_Sunshine/tests/tests_common.h
|
/**
* @file tests/tests_common.h
* @brief Common declarations.
*/
#pragma once
#include <gtest/gtest.h>
#include <src/globals.h>
#include <src/logging.h>
#include <src/platform/common.h>
struct PlatformTestSuite: testing::Test {
static void
SetUpTestSuite() {
ASSERT_FALSE(platf_deinit);
BOOST_LOG(tests) << "Setting up platform test suite";
platf_deinit = platf::init();
ASSERT_TRUE(platf_deinit);
}
static void
TearDownTestSuite() {
ASSERT_TRUE(platf_deinit);
platf_deinit = {};
BOOST_LOG(tests) << "Tore down platform test suite";
}
private:
inline static std::unique_ptr<platf::deinit_t> platf_deinit;
};
| 658
|
C++
|
.h
| 26
| 22.423077
| 62
| 0.707006
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,031
|
stream.h
|
LizardByte_Sunshine/src/stream.h
|
/**
* @file src/stream.h
* @brief Declarations for the streaming protocols.
*/
#pragma once
#include <utility>
#include <boost/asio.hpp>
#include "audio.h"
#include "crypto.h"
#include "video.h"
namespace stream {
constexpr auto VIDEO_STREAM_PORT = 9;
constexpr auto CONTROL_PORT = 10;
constexpr auto AUDIO_STREAM_PORT = 11;
struct session_t;
struct config_t {
audio::config_t audio;
video::config_t monitor;
int packetsize;
int minRequiredFecPackets;
int mlFeatureFlags;
int controlProtocolType;
int audioQosType;
int videoQosType;
uint32_t encryptionFlagsEnabled;
std::optional<int> gcmap;
};
namespace session {
enum class state_e : int {
STOPPED, ///< The session is stopped
STOPPING, ///< The session is stopping
STARTING, ///< The session is starting
RUNNING, ///< The session is running
};
std::shared_ptr<session_t>
alloc(config_t &config, rtsp_stream::launch_session_t &launch_session);
int
start(session_t &session, const std::string &addr_string);
void
stop(session_t &session);
void
join(session_t &session);
state_e
state(session_t &session);
} // namespace session
} // namespace stream
| 1,247
|
C++
|
.h
| 46
| 23.065217
| 75
| 0.689597
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,032
|
crypto.h
|
LizardByte_Sunshine/src/crypto.h
|
/**
* @file src/crypto.h
* @brief Declarations for cryptography functions.
*/
#pragma once
#include <array>
#include <openssl/evp.h>
#include <openssl/rand.h>
#include <openssl/sha.h>
#include <openssl/x509.h>
#include "utility.h"
namespace crypto {
struct creds_t {
std::string x509;
std::string pkey;
};
void
md_ctx_destroy(EVP_MD_CTX *);
using sha256_t = std::array<std::uint8_t, SHA256_DIGEST_LENGTH>;
using aes_t = std::vector<std::uint8_t>;
using x509_t = util::safe_ptr<X509, X509_free>;
using x509_store_t = util::safe_ptr<X509_STORE, X509_STORE_free>;
using x509_store_ctx_t = util::safe_ptr<X509_STORE_CTX, X509_STORE_CTX_free>;
using cipher_ctx_t = util::safe_ptr<EVP_CIPHER_CTX, EVP_CIPHER_CTX_free>;
using md_ctx_t = util::safe_ptr<EVP_MD_CTX, md_ctx_destroy>;
using bio_t = util::safe_ptr<BIO, BIO_free_all>;
using pkey_t = util::safe_ptr<EVP_PKEY, EVP_PKEY_free>;
using pkey_ctx_t = util::safe_ptr<EVP_PKEY_CTX, EVP_PKEY_CTX_free>;
using bignum_t = util::safe_ptr<BIGNUM, BN_free>;
/**
* @brief Hashes the given plaintext using SHA-256.
* @param plaintext
* @return The SHA-256 hash of the plaintext.
*/
sha256_t
hash(const std::string_view &plaintext);
aes_t
gen_aes_key(const std::array<uint8_t, 16> &salt, const std::string_view &pin);
x509_t
x509(const std::string_view &x);
pkey_t
pkey(const std::string_view &k);
std::string
pem(x509_t &x509);
std::string
pem(pkey_t &pkey);
std::vector<uint8_t>
sign256(const pkey_t &pkey, const std::string_view &data);
bool
verify256(const x509_t &x509, const std::string_view &data, const std::string_view &signature);
creds_t
gen_creds(const std::string_view &cn, std::uint32_t key_bits);
std::string_view
signature(const x509_t &x);
std::string
rand(std::size_t bytes);
std::string
rand_alphabet(std::size_t bytes,
const std::string_view &alphabet = std::string_view { "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!%&()=-" });
class cert_chain_t {
public:
KITTY_DECL_CONSTR(cert_chain_t)
void
add(x509_t &&cert);
void
clear();
const char *
verify(x509_t::element_type *cert);
private:
std::vector<std::pair<x509_t, x509_store_t>> _certs;
x509_store_ctx_t _cert_ctx;
};
namespace cipher {
constexpr std::size_t tag_size = 16;
constexpr std::size_t
round_to_pkcs7_padded(std::size_t size) {
return ((size + 15) / 16) * 16;
}
class cipher_t {
public:
cipher_ctx_t decrypt_ctx;
cipher_ctx_t encrypt_ctx;
aes_t key;
bool padding;
};
class ecb_t: public cipher_t {
public:
ecb_t() = default;
ecb_t(ecb_t &&) noexcept = default;
ecb_t &
operator=(ecb_t &&) noexcept = default;
ecb_t(const aes_t &key, bool padding = true);
int
encrypt(const std::string_view &plaintext, std::vector<std::uint8_t> &cipher);
int
decrypt(const std::string_view &cipher, std::vector<std::uint8_t> &plaintext);
};
class gcm_t: public cipher_t {
public:
gcm_t() = default;
gcm_t(gcm_t &&) noexcept = default;
gcm_t &
operator=(gcm_t &&) noexcept = default;
gcm_t(const crypto::aes_t &key, bool padding = true);
/**
* @brief Encrypts the plaintext using AES GCM mode.
* @param plaintext The plaintext data to be encrypted.
* @param tag The buffer where the GCM tag will be written.
* @param ciphertext The buffer where the resulting ciphertext will be written.
* @param iv The initialization vector to be used for the encryption.
* @return The total length of the ciphertext and GCM tag. Returns -1 in case of an error.
*/
int
encrypt(const std::string_view &plaintext, std::uint8_t *tag, std::uint8_t *ciphertext, aes_t *iv);
/**
* @brief Encrypts the plaintext using AES GCM mode.
* length of cipher must be at least: round_to_pkcs7_padded(plaintext.size()) + crypto::cipher::tag_size
* @param plaintext The plaintext data to be encrypted.
* @param tagged_cipher The buffer where the resulting ciphertext and GCM tag will be written.
* @param iv The initialization vector to be used for the encryption.
* @return The total length of the ciphertext and GCM tag written into tagged_cipher. Returns -1 in case of an error.
*/
int
encrypt(const std::string_view &plaintext, std::uint8_t *tagged_cipher, aes_t *iv);
int
decrypt(const std::string_view &cipher, std::vector<std::uint8_t> &plaintext, aes_t *iv);
};
class cbc_t: public cipher_t {
public:
cbc_t() = default;
cbc_t(cbc_t &&) noexcept = default;
cbc_t &
operator=(cbc_t &&) noexcept = default;
cbc_t(const crypto::aes_t &key, bool padding = true);
/**
* @brief Encrypts the plaintext using AES CBC mode.
* length of cipher must be at least: round_to_pkcs7_padded(plaintext.size())
* @param plaintext The plaintext data to be encrypted.
* @param cipher The buffer where the resulting ciphertext will be written.
* @param iv The initialization vector to be used for the encryption.
* @return The total length of the ciphertext written into cipher. Returns -1 in case of an error.
*/
int
encrypt(const std::string_view &plaintext, std::uint8_t *cipher, aes_t *iv);
};
} // namespace cipher
} // namespace crypto
| 5,525
|
C++
|
.h
| 147
| 32.442177
| 133
| 0.661366
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,033
|
upnp.h
|
LizardByte_Sunshine/src/upnp.h
|
/**
* @file src/upnp.h
* @brief Declarations for UPnP port mapping.
*/
#pragma once
#include <miniupnpc/miniupnpc.h>
#include "platform/common.h"
/**
* @brief UPnP port mapping.
*/
namespace upnp {
constexpr auto INET6_ADDRESS_STRLEN = 46;
constexpr auto IPv4 = 0;
constexpr auto IPv6 = 1;
constexpr auto PORT_MAPPING_LIFETIME = 3600s;
constexpr auto REFRESH_INTERVAL = 120s;
using device_t = util::safe_ptr<UPNPDev, freeUPNPDevlist>;
KITTY_USING_MOVE_T(urls_t, UPNPUrls, , {
FreeUPNPUrls(&el);
});
/**
* @brief Get the valid IGD status.
* @param device The device.
* @param urls The URLs.
* @param data The IGD data.
* @param lan_addr The LAN address.
* @return The UPnP Status.
* @retval 0 No IGD found.
* @retval 1 A valid connected IGD has been found.
* @retval 2 A valid IGD has been found but it reported as not connected.
* @retval 3 An UPnP device has been found but was not recognized as an IGD.
*/
int
UPNP_GetValidIGDStatus(device_t &device, urls_t *urls, IGDdatas *data, std::array<char, INET6_ADDRESS_STRLEN> &lan_addr);
[[nodiscard]] std::unique_ptr<platf::deinit_t>
start();
} // namespace upnp
| 1,186
|
C++
|
.h
| 37
| 29.027027
| 123
| 0.69965
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,034
|
video.h
|
LizardByte_Sunshine/src/video.h
|
/**
* @file src/video.h
* @brief Declarations for video.
*/
#pragma once
#include "input.h"
#include "platform/common.h"
#include "thread_safe.h"
#include "video_colorspace.h"
extern "C" {
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
}
struct AVPacket;
namespace video {
/* Encoding configuration requested by remote client */
struct config_t {
int width; // Video width in pixels
int height; // Video height in pixels
int framerate; // Requested framerate, used in individual frame bitrate budget calculation
int bitrate; // Video bitrate in kilobits (1000 bits) for requested framerate
int slicesPerFrame; // Number of slices per frame
int numRefFrames; // Max number of reference frames
/* Requested color range and SDR encoding colorspace, HDR encoding colorspace is always BT.2020+ST2084
Color range (encoderCscMode & 0x1) : 0 - limited, 1 - full
SDR encoding colorspace (encoderCscMode >> 1) : 0 - BT.601, 1 - BT.709, 2 - BT.2020 */
int encoderCscMode;
int videoFormat; // 0 - H.264, 1 - HEVC, 2 - AV1
/* Encoding color depth (bit depth): 0 - 8-bit, 1 - 10-bit
HDR encoding activates when color depth is higher than 8-bit and the display which is being captured is operating in HDR mode */
int dynamicRange;
int chromaSamplingType; // 0 - 4:2:0, 1 - 4:4:4
};
platf::mem_type_e
map_base_dev_type(AVHWDeviceType type);
platf::pix_fmt_e
map_pix_fmt(AVPixelFormat fmt);
void
free_ctx(AVCodecContext *ctx);
void
free_frame(AVFrame *frame);
void
free_buffer(AVBufferRef *ref);
using avcodec_ctx_t = util::safe_ptr<AVCodecContext, free_ctx>;
using avcodec_frame_t = util::safe_ptr<AVFrame, free_frame>;
using avcodec_buffer_t = util::safe_ptr<AVBufferRef, free_buffer>;
using sws_t = util::safe_ptr<SwsContext, sws_freeContext>;
using img_event_t = std::shared_ptr<safe::event_t<std::shared_ptr<platf::img_t>>>;
struct encoder_platform_formats_t {
virtual ~encoder_platform_formats_t() = default;
platf::mem_type_e dev_type;
platf::pix_fmt_e pix_fmt_8bit, pix_fmt_10bit;
platf::pix_fmt_e pix_fmt_yuv444_8bit, pix_fmt_yuv444_10bit;
};
struct encoder_platform_formats_avcodec: encoder_platform_formats_t {
using init_buffer_function_t = std::function<util::Either<avcodec_buffer_t, int>(platf::avcodec_encode_device_t *)>;
encoder_platform_formats_avcodec(
const AVHWDeviceType &avcodec_base_dev_type,
const AVHWDeviceType &avcodec_derived_dev_type,
const AVPixelFormat &avcodec_dev_pix_fmt,
const AVPixelFormat &avcodec_pix_fmt_8bit,
const AVPixelFormat &avcodec_pix_fmt_10bit,
const AVPixelFormat &avcodec_pix_fmt_yuv444_8bit,
const AVPixelFormat &avcodec_pix_fmt_yuv444_10bit,
const init_buffer_function_t &init_avcodec_hardware_input_buffer_function):
avcodec_base_dev_type { avcodec_base_dev_type },
avcodec_derived_dev_type { avcodec_derived_dev_type },
avcodec_dev_pix_fmt { avcodec_dev_pix_fmt },
avcodec_pix_fmt_8bit { avcodec_pix_fmt_8bit },
avcodec_pix_fmt_10bit { avcodec_pix_fmt_10bit },
avcodec_pix_fmt_yuv444_8bit { avcodec_pix_fmt_yuv444_8bit },
avcodec_pix_fmt_yuv444_10bit { avcodec_pix_fmt_yuv444_10bit },
init_avcodec_hardware_input_buffer { init_avcodec_hardware_input_buffer_function } {
dev_type = map_base_dev_type(avcodec_base_dev_type);
pix_fmt_8bit = map_pix_fmt(avcodec_pix_fmt_8bit);
pix_fmt_10bit = map_pix_fmt(avcodec_pix_fmt_10bit);
pix_fmt_yuv444_8bit = map_pix_fmt(avcodec_pix_fmt_yuv444_8bit);
pix_fmt_yuv444_10bit = map_pix_fmt(avcodec_pix_fmt_yuv444_10bit);
}
AVHWDeviceType avcodec_base_dev_type, avcodec_derived_dev_type;
AVPixelFormat avcodec_dev_pix_fmt;
AVPixelFormat avcodec_pix_fmt_8bit, avcodec_pix_fmt_10bit;
AVPixelFormat avcodec_pix_fmt_yuv444_8bit, avcodec_pix_fmt_yuv444_10bit;
init_buffer_function_t init_avcodec_hardware_input_buffer;
};
struct encoder_platform_formats_nvenc: encoder_platform_formats_t {
encoder_platform_formats_nvenc(
const platf::mem_type_e &dev_type,
const platf::pix_fmt_e &pix_fmt_8bit,
const platf::pix_fmt_e &pix_fmt_10bit,
const platf::pix_fmt_e &pix_fmt_yuv444_8bit,
const platf::pix_fmt_e &pix_fmt_yuv444_10bit) {
encoder_platform_formats_t::dev_type = dev_type;
encoder_platform_formats_t::pix_fmt_8bit = pix_fmt_8bit;
encoder_platform_formats_t::pix_fmt_10bit = pix_fmt_10bit;
encoder_platform_formats_t::pix_fmt_yuv444_8bit = pix_fmt_yuv444_8bit;
encoder_platform_formats_t::pix_fmt_yuv444_10bit = pix_fmt_yuv444_10bit;
}
};
struct encoder_t {
std::string_view name;
enum flag_e {
PASSED, ///< Indicates the encoder is supported.
REF_FRAMES_RESTRICT, ///< Set maximum reference frames.
DYNAMIC_RANGE, ///< HDR support.
YUV444, ///< YUV 4:4:4 support.
VUI_PARAMETERS, ///< AMD encoder with VAAPI doesn't add VUI parameters to SPS.
MAX_FLAGS ///< Maximum number of flags.
};
static std::string_view
from_flag(flag_e flag) {
#define _CONVERT(x) \
case flag_e::x: \
return std::string_view(#x)
switch (flag) {
_CONVERT(PASSED);
_CONVERT(REF_FRAMES_RESTRICT);
_CONVERT(DYNAMIC_RANGE);
_CONVERT(YUV444);
_CONVERT(VUI_PARAMETERS);
_CONVERT(MAX_FLAGS);
}
#undef _CONVERT
return { "unknown" };
}
struct option_t {
KITTY_DEFAULT_CONSTR_MOVE(option_t)
option_t(const option_t &) = default;
std::string name;
std::variant<int, int *, std::optional<int> *, std::function<int()>, std::string, std::string *> value;
option_t(std::string &&name, decltype(value) &&value):
name { std::move(name) }, value { std::move(value) } {}
};
const std::unique_ptr<const encoder_platform_formats_t> platform_formats;
struct codec_t {
std::vector<option_t> common_options;
std::vector<option_t> sdr_options;
std::vector<option_t> hdr_options;
std::vector<option_t> sdr444_options;
std::vector<option_t> hdr444_options;
std::vector<option_t> fallback_options;
std::string name;
std::bitset<MAX_FLAGS> capabilities;
bool
operator[](flag_e flag) const {
return capabilities[(std::size_t) flag];
}
std::bitset<MAX_FLAGS>::reference
operator[](flag_e flag) {
return capabilities[(std::size_t) flag];
}
} av1, hevc, h264;
const codec_t &
codec_from_config(const config_t &config) const {
switch (config.videoFormat) {
default:
BOOST_LOG(error) << "Unknown video format " << config.videoFormat << ", falling back to H.264";
// fallthrough
case 0:
return h264;
case 1:
return hevc;
case 2:
return av1;
}
}
uint32_t flags;
};
struct encode_session_t {
virtual ~encode_session_t() = default;
virtual int
convert(platf::img_t &img) = 0;
virtual void
request_idr_frame() = 0;
virtual void
request_normal_frame() = 0;
virtual void
invalidate_ref_frames(int64_t first_frame, int64_t last_frame) = 0;
};
// encoders
extern encoder_t software;
#if !defined(__APPLE__)
extern encoder_t nvenc; // available for windows and linux
#endif
#ifdef _WIN32
extern encoder_t amdvce;
extern encoder_t quicksync;
#endif
#ifdef __linux__
extern encoder_t vaapi;
#endif
#ifdef __APPLE__
extern encoder_t videotoolbox;
#endif
struct packet_raw_t {
virtual ~packet_raw_t() = default;
virtual bool
is_idr() = 0;
virtual int64_t
frame_index() = 0;
virtual uint8_t *
data() = 0;
virtual size_t
data_size() = 0;
struct replace_t {
std::string_view old;
std::string_view _new;
KITTY_DEFAULT_CONSTR_MOVE(replace_t)
replace_t(std::string_view old, std::string_view _new) noexcept:
old { std::move(old) }, _new { std::move(_new) } {}
};
std::vector<replace_t> *replacements = nullptr;
void *channel_data = nullptr;
bool after_ref_frame_invalidation = false;
std::optional<std::chrono::steady_clock::time_point> frame_timestamp;
};
struct packet_raw_avcodec: packet_raw_t {
packet_raw_avcodec() {
av_packet = av_packet_alloc();
}
~packet_raw_avcodec() {
av_packet_free(&this->av_packet);
}
bool
is_idr() override {
return av_packet->flags & AV_PKT_FLAG_KEY;
}
int64_t
frame_index() override {
return av_packet->pts;
}
uint8_t *
data() override {
return av_packet->data;
}
size_t
data_size() override {
return av_packet->size;
}
AVPacket *av_packet;
};
struct packet_raw_generic: packet_raw_t {
packet_raw_generic(std::vector<uint8_t> &&frame_data, int64_t frame_index, bool idr):
frame_data { std::move(frame_data) }, index { frame_index }, idr { idr } {
}
bool
is_idr() override {
return idr;
}
int64_t
frame_index() override {
return index;
}
uint8_t *
data() override {
return frame_data.data();
}
size_t
data_size() override {
return frame_data.size();
}
std::vector<uint8_t> frame_data;
int64_t index;
bool idr;
};
using packet_t = std::unique_ptr<packet_raw_t>;
struct hdr_info_raw_t {
explicit hdr_info_raw_t(bool enabled):
enabled { enabled }, metadata {} {};
explicit hdr_info_raw_t(bool enabled, const SS_HDR_METADATA &metadata):
enabled { enabled }, metadata { metadata } {};
bool enabled;
SS_HDR_METADATA metadata;
};
using hdr_info_t = std::unique_ptr<hdr_info_raw_t>;
extern int active_hevc_mode;
extern int active_av1_mode;
extern bool last_encoder_probe_supported_ref_frames_invalidation;
extern std::array<bool, 3> last_encoder_probe_supported_yuv444_for_codec; // 0 - H.264, 1 - HEVC, 2 - AV1
void
capture(
safe::mail_t mail,
config_t config,
void *channel_data);
bool
validate_encoder(encoder_t &encoder, bool expect_failure);
/**
* @brief Probe encoders and select the preferred encoder.
* This is called once at startup and each time a stream is launched to
* ensure the best encoder is selected. Encoder availability can change
* at runtime due to all sorts of things from driver updates to eGPUs.
*
* @warning This is only safe to call when there is no client actively streaming.
*/
int
probe_encoders();
} // namespace video
| 10,711
|
C++
|
.h
| 297
| 30.643098
| 135
| 0.665507
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,035
|
audio.h
|
LizardByte_Sunshine/src/audio.h
|
/**
* @file src/audio.h
* @brief Declarations for audio capture and encoding.
*/
#pragma once
#include "thread_safe.h"
#include "utility.h"
#include <bitset>
namespace audio {
enum stream_config_e : int {
STEREO, ///< Stereo
HIGH_STEREO, ///< High stereo
SURROUND51, ///< Surround 5.1
HIGH_SURROUND51, ///< High surround 5.1
SURROUND71, ///< Surround 7.1
HIGH_SURROUND71, ///< High surround 7.1
MAX_STREAM_CONFIG ///< Maximum audio stream configuration
};
struct opus_stream_config_t {
std::int32_t sampleRate;
int channelCount;
int streams;
int coupledStreams;
const std::uint8_t *mapping;
int bitrate;
};
struct stream_params_t {
int channelCount;
int streams;
int coupledStreams;
std::uint8_t mapping[8];
};
extern opus_stream_config_t stream_configs[MAX_STREAM_CONFIG];
struct config_t {
enum flags_e : int {
HIGH_QUALITY, ///< High quality audio
HOST_AUDIO, ///< Host audio
CUSTOM_SURROUND_PARAMS, ///< Custom surround parameters
MAX_FLAGS ///< Maximum number of flags
};
int packetDuration;
int channels;
int mask;
stream_params_t customStreamParams;
std::bitset<MAX_FLAGS> flags;
};
using buffer_t = util::buffer_t<std::uint8_t>;
using packet_t = std::pair<void *, buffer_t>;
void
capture(safe::mail_t mail, config_t config, void *channel_data);
} // namespace audio
| 1,446
|
C++
|
.h
| 51
| 24.215686
| 66
| 0.666908
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,037
|
sync.h
|
LizardByte_Sunshine/src/sync.h
|
/**
* @file src/sync.h
* @brief Declarations for synchronization utilities.
*/
#pragma once
#include <array>
#include <mutex>
#include <utility>
namespace sync_util {
template <class T, class M = std::mutex>
class sync_t {
public:
using value_t = T;
using mutex_t = M;
std::lock_guard<mutex_t>
lock() {
return std::lock_guard { _lock };
}
template <class... Args>
sync_t(Args &&...args):
raw { std::forward<Args>(args)... } {}
sync_t &
operator=(sync_t &&other) noexcept {
std::lock(_lock, other._lock);
raw = std::move(other.raw);
_lock.unlock();
other._lock.unlock();
return *this;
}
sync_t &
operator=(sync_t &other) noexcept {
std::lock(_lock, other._lock);
raw = other.raw;
_lock.unlock();
other._lock.unlock();
return *this;
}
template <class V>
sync_t &
operator=(V &&val) {
auto lg = lock();
raw = val;
return *this;
}
sync_t &
operator=(const value_t &val) noexcept {
auto lg = lock();
raw = val;
return *this;
}
sync_t &
operator=(value_t &&val) noexcept {
auto lg = lock();
raw = std::move(val);
return *this;
}
value_t *
operator->() {
return &raw;
}
value_t &
operator*() {
return raw;
}
const value_t &
operator*() const {
return raw;
}
value_t raw;
private:
mutex_t _lock;
};
} // namespace sync_util
| 1,540
|
C++
|
.h
| 73
| 15.671233
| 53
| 0.547603
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,038
|
globals.h
|
LizardByte_Sunshine/src/globals.h
|
/**
* @file globals.h
* @brief Declarations for globally accessible variables and functions.
*/
#pragma once
#include "entry_handler.h"
#include "thread_pool.h"
/**
* @brief A thread pool for processing tasks.
*/
extern thread_pool_util::ThreadPool task_pool;
/**
* @brief A boolean flag to indicate whether the cursor should be displayed.
*/
extern bool display_cursor;
#ifdef _WIN32
// Declare global singleton used for NVIDIA control panel modifications
#include "platform/windows/nvprefs/nvprefs_interface.h"
/**
* @brief A global singleton used for NVIDIA control panel modifications.
*/
extern nvprefs::nvprefs_interface nvprefs_instance;
#endif
/**
* @brief Handles process-wide communication.
*/
namespace mail {
#define MAIL(x) \
constexpr auto x = std::string_view { \
#x \
}
/**
* @brief A process-wide communication mechanism.
*/
extern safe::mail_t man;
// Global mail
MAIL(shutdown);
MAIL(broadcast_shutdown);
MAIL(video_packets);
MAIL(audio_packets);
MAIL(switch_display);
// Local mail
MAIL(touch_port);
MAIL(idr);
MAIL(invalidate_ref_frames);
MAIL(gamepad_feedback);
MAIL(hdr);
#undef MAIL
} // namespace mail
| 1,252
|
C++
|
.h
| 49
| 23.183673
| 76
| 0.699916
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,039
|
video_colorspace.h
|
LizardByte_Sunshine/src/video_colorspace.h
|
/**
* @file src/video_colorspace.h
* @brief Declarations for colorspace functions.
*/
#pragma once
extern "C" {
#include <libavutil/pixfmt.h>
}
namespace video {
enum class colorspace_e {
rec601, ///< Rec. 601
rec709, ///< Rec. 709
bt2020sdr, ///< Rec. 2020 SDR
bt2020, ///< Rec. 2020 HDR
};
struct sunshine_colorspace_t {
colorspace_e colorspace;
bool full_range;
unsigned bit_depth;
};
bool
colorspace_is_hdr(const sunshine_colorspace_t &colorspace);
// Declared in video.h
struct config_t;
sunshine_colorspace_t
colorspace_from_client_config(const config_t &config, bool hdr_display);
struct avcodec_colorspace_t {
AVColorPrimaries primaries;
AVColorTransferCharacteristic transfer_function;
AVColorSpace matrix;
AVColorRange range;
int software_format;
};
avcodec_colorspace_t
avcodec_colorspace_from_sunshine_colorspace(const sunshine_colorspace_t &sunshine_colorspace);
struct alignas(16) color_t {
float color_vec_y[4];
float color_vec_u[4];
float color_vec_v[4];
float range_y[2];
float range_uv[2];
};
const color_t *
color_vectors_from_colorspace(const sunshine_colorspace_t &colorspace);
const color_t *
color_vectors_from_colorspace(colorspace_e colorspace, bool full_range);
/**
* @brief New version of `color_vectors_from_colorspace()` function that better adheres to the standards.
* Returned vectors are used to perform RGB->YUV conversion.
* Unlike its predecessor, color vectors will produce output in `UINT` range, not `UNORM` range.
* Input is still in `UNORM` range. Returned vectors won't modify color primaries and color
* transfer function.
* @param colorspace Targeted YUV colorspace.
* @return `const color_t*` that contains RGB->YUV transformation vectors.
* Components `range_y` and `range_uv` are there for backwards compatibility
* and can be ignored in the computation.
*/
const color_t *
new_color_vectors_from_colorspace(const sunshine_colorspace_t &colorspace);
} // namespace video
| 2,199
|
C++
|
.h
| 60
| 31.766667
| 108
| 0.6938
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,040
|
nvhttp.h
|
LizardByte_Sunshine/src/nvhttp.h
|
/**
* @file src/nvhttp.h
* @brief Declarations for the nvhttp (GameStream) server.
*/
// macros
#pragma once
// standard includes
#include <string>
// lib includes
#include <boost/property_tree/ptree.hpp>
// local includes
#include "thread_safe.h"
/**
* @brief Contains all the functions and variables related to the nvhttp (GameStream) server.
*/
namespace nvhttp {
/**
* @brief The protocol version.
* @details The version of the GameStream protocol we are mocking.
* @note The negative 4th number indicates to Moonlight that this is Sunshine.
*/
constexpr auto VERSION = "7.1.431.-1";
/**
* @brief The GFE version we are replicating.
*/
constexpr auto GFE_VERSION = "3.23.0.74";
/**
* @brief The HTTP port, as a difference from the config port.
*/
constexpr auto PORT_HTTP = 0;
/**
* @brief The HTTPS port, as a difference from the config port.
*/
constexpr auto PORT_HTTPS = -5;
/**
* @brief Start the nvhttp server.
* @examples
* nvhttp::start();
* @examples_end
*/
void
start();
/**
* @brief Compare the user supplied pin to the Moonlight pin.
* @param pin The user supplied pin.
* @param name The user supplied name.
* @return `true` if the pin is correct, `false` otherwise.
* @examples
* bool pin_status = nvhttp::pin("1234", "laptop");
* @examples_end
*/
bool
pin(std::string pin, std::string name);
/**
* @brief Remove single client.
* @examples
* nvhttp::unpair_client("4D7BB2DD-5704-A405-B41C-891A022932E1");
* @examples_end
*/
int
unpair_client(std::string uniqueid);
/**
* @brief Get all paired clients.
* @return The list of all paired clients.
* @examples
* boost::property_tree::ptree clients = nvhttp::get_all_clients();
* @examples_end
*/
boost::property_tree::ptree
get_all_clients();
/**
* @brief Remove all paired clients.
* @examples
* nvhttp::erase_all_clients();
* @examples_end
*/
void
erase_all_clients();
} // namespace nvhttp
| 2,038
|
C++
|
.h
| 79
| 22.506329
| 93
| 0.669065
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,041
|
thread_pool.h
|
LizardByte_Sunshine/src/thread_pool.h
|
/**
* @file src/thread_pool.h
* @brief Declarations for the thread pool system.
*/
#pragma once
#include "task_pool.h"
#include <thread>
namespace thread_pool_util {
/**
* Allow threads to execute unhindered while keeping full control over the threads.
*/
class ThreadPool: public task_pool_util::TaskPool {
public:
typedef TaskPool::__task __task;
private:
std::vector<std::thread> _thread;
std::condition_variable _cv;
std::mutex _lock;
bool _continue;
public:
ThreadPool():
_continue { false } {}
explicit ThreadPool(int threads):
_thread(threads), _continue { true } {
for (auto &t : _thread) {
t = std::thread(&ThreadPool::_main, this);
}
}
~ThreadPool() noexcept {
if (!_continue) return;
stop();
join();
}
template <class Function, class... Args>
auto
push(Function &&newTask, Args &&...args) {
std::lock_guard lg(_lock);
auto future = TaskPool::push(std::forward<Function>(newTask), std::forward<Args>(args)...);
_cv.notify_one();
return future;
}
void
pushDelayed(std::pair<__time_point, __task> &&task) {
std::lock_guard lg(_lock);
TaskPool::pushDelayed(std::move(task));
}
template <class Function, class X, class Y, class... Args>
auto
pushDelayed(Function &&newTask, std::chrono::duration<X, Y> duration, Args &&...args) {
std::lock_guard lg(_lock);
auto future = TaskPool::pushDelayed(std::forward<Function>(newTask), duration, std::forward<Args>(args)...);
// Update all timers for wait_until
_cv.notify_all();
return future;
}
void
start(int threads) {
_continue = true;
_thread.resize(threads);
for (auto &t : _thread) {
t = std::thread(&ThreadPool::_main, this);
}
}
void
stop() {
std::lock_guard lg(_lock);
_continue = false;
_cv.notify_all();
}
void
join() {
for (auto &t : _thread) {
t.join();
}
}
public:
void
_main() {
while (_continue) {
if (auto task = this->pop()) {
(*task)->run();
}
else {
std::unique_lock uniq_lock(_lock);
if (ready()) {
continue;
}
if (!_continue) {
break;
}
if (auto tp = next()) {
_cv.wait_until(uniq_lock, *tp);
}
else {
_cv.wait(uniq_lock);
}
}
}
// Execute remaining tasks
while (auto task = this->pop()) {
(*task)->run();
}
}
};
} // namespace thread_pool_util
| 2,705
|
C++
|
.h
| 105
| 19.152381
| 114
| 0.547786
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,042
|
stat_trackers.h
|
LizardByte_Sunshine/src/stat_trackers.h
|
/**
* @file src/stat_trackers.h
* @brief Declarations for streaming statistic tracking.
*/
#pragma once
#include <chrono>
#include <functional>
#include <limits>
#include <boost/format.hpp>
namespace stat_trackers {
boost::format
one_digit_after_decimal();
boost::format
two_digits_after_decimal();
template <typename T>
class min_max_avg_tracker {
public:
using callback_function = std::function<void(T stat_min, T stat_max, double stat_avg)>;
void
collect_and_callback_on_interval(T stat, const callback_function &callback, std::chrono::seconds interval_in_seconds) {
if (data.calls == 0) {
data.last_callback_time = std::chrono::steady_clock::now();
}
else if (std::chrono::steady_clock::now() > data.last_callback_time + interval_in_seconds) {
callback(data.stat_min, data.stat_max, data.stat_total / data.calls);
data = {};
}
data.stat_min = std::min(data.stat_min, stat);
data.stat_max = std::max(data.stat_max, stat);
data.stat_total += stat;
data.calls += 1;
}
void
reset() {
data = {};
}
private:
struct {
std::chrono::steady_clock::time_point last_callback_time = std::chrono::steady_clock::now();
T stat_min = std::numeric_limits<T>::max();
T stat_max = std::numeric_limits<T>::min();
double stat_total = 0;
uint32_t calls = 0;
} data;
};
} // namespace stat_trackers
| 1,511
|
C++
|
.h
| 46
| 26.826087
| 124
| 0.625606
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,043
|
system_tray.h
|
LizardByte_Sunshine/src/system_tray.h
|
/**
* @file src/system_tray.h
* @brief Declarations for the system tray icon and notification system.
*/
#pragma once
/**
* @brief Handles the system tray icon and notification system.
*/
namespace system_tray {
/**
* @brief Callback for opening the UI from the system tray.
* @param item The tray menu item.
*/
void
tray_open_ui_cb(struct tray_menu *item);
/**
* @brief Callback for opening GitHub Sponsors from the system tray.
* @param item The tray menu item.
*/
void
tray_donate_github_cb(struct tray_menu *item);
/**
* @brief Callback for opening Patreon from the system tray.
* @param item The tray menu item.
*/
void
tray_donate_patreon_cb(struct tray_menu *item);
/**
* @brief Callback for opening PayPal donation from the system tray.
* @param item The tray menu item.
*/
void
tray_donate_paypal_cb(struct tray_menu *item);
/**
* @brief Callback for restarting Sunshine from the system tray.
* @param item The tray menu item.
*/
void
tray_restart_cb(struct tray_menu *item);
/**
* @brief Callback for exiting Sunshine from the system tray.
* @param item The tray menu item.
*/
void
tray_quit_cb(struct tray_menu *item);
/**
* @brief Create the system tray.
* @details This function has an endless loop, so it should be run in a separate thread.
* @return 1 if the system tray failed to create, otherwise 0 once the tray has been terminated.
*/
int
system_tray();
/**
* @brief Run the system tray with platform specific options.
* @todo macOS requires that UI elements be created on the main thread, so the system tray is not currently implemented for macOS.
*/
int
run_tray();
/**
* @brief Exit the system tray.
* @return 0 after exiting the system tray.
*/
int
end_tray();
/**
* @brief Sets the tray icon in playing mode and spawns the appropriate notification
* @param app_name The started application name
*/
void
update_tray_playing(std::string app_name);
/**
* @brief Sets the tray icon in pausing mode (stream stopped but app running) and spawns the appropriate notification
* @param app_name The paused application name
*/
void
update_tray_pausing(std::string app_name);
/**
* @brief Sets the tray icon in stopped mode (app and stream stopped) and spawns the appropriate notification
* @param app_name The started application name
*/
void
update_tray_stopped(std::string app_name);
/**
* @brief Spawns a notification for PIN Pairing. Clicking it opens the PIN Web UI Page
*/
void
update_tray_require_pin();
} // namespace system_tray
| 2,665
|
C++
|
.h
| 88
| 26.863636
| 132
| 0.703198
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,047
|
entry_handler.h
|
LizardByte_Sunshine/src/entry_handler.h
|
/**
* @file entry_handler.h
* @brief Declarations for entry handling functions.
*/
#pragma once
// standard includes
#include <atomic>
#include <string_view>
// local includes
#include "thread_pool.h"
#include "thread_safe.h"
/**
* @brief Launch the Web UI.
* @examples
* launch_ui();
* @examples_end
*/
void
launch_ui();
/**
* @brief Launch the Web UI at a specific endpoint.
* @examples
* launch_ui_with_path("/pin");
* @examples_end
*/
void
launch_ui_with_path(std::string path);
/**
* @brief Functions for handling command line arguments.
*/
namespace args {
/**
* @brief Reset the user credentials.
* @param name The name of the program.
* @param argc The number of arguments.
* @param argv The arguments.
* @examples
* creds("sunshine", 2, {"new_username", "new_password"});
* @examples_end
*/
int
creds(const char *name, int argc, char *argv[]);
/**
* @brief Print help to stdout, then exit.
* @param name The name of the program.
* @examples
* help("sunshine");
* @examples_end
*/
int
help(const char *name);
/**
* @brief Print the version to stdout, then exit.
* @examples
* version();
* @examples_end
*/
int
version();
#ifdef _WIN32
/**
* @brief Restore global NVIDIA control panel settings.
* If Sunshine was improperly terminated, this function restores
* the global NVIDIA control panel settings to the undo file left
* by Sunshine. This function is typically called by the uninstaller.
* @examples
* restore_nvprefs_undo();
* @examples_end
*/
int
restore_nvprefs_undo();
#endif
} // namespace args
/**
* @brief Functions for handling the lifetime of Sunshine.
*/
namespace lifetime {
extern char **argv;
extern std::atomic_int desired_exit_code;
/**
* @brief Terminates Sunshine gracefully with the provided exit code.
* @param exit_code The exit code to return from main().
* @param async Specifies whether our termination will be non-blocking.
*/
void
exit_sunshine(int exit_code, bool async);
/**
* @brief Breaks into the debugger or terminates Sunshine if no debugger is attached.
*/
void
debug_trap();
/**
* @brief Get the argv array passed to main().
*/
char **
get_argv();
} // namespace lifetime
/**
* @brief Log the publisher metadata provided from CMake.
*/
void
log_publisher_data();
#ifdef _WIN32
/**
* @brief Check if NVIDIA's GameStream software is running.
* @return `true` if GameStream is enabled, `false` otherwise.
*/
bool
is_gamestream_enabled();
/**
* @brief Namespace for controlling the Sunshine service model on Windows.
*/
namespace service_ctrl {
/**
* @brief Check if the service is running.
* @examples
* is_service_running();
* @examples_end
*/
bool
is_service_running();
/**
* @brief Start the service and wait for startup to complete.
* @examples
* start_service();
* @examples_end
*/
bool
start_service();
/**
* @brief Wait for the UI to be ready after Sunshine startup.
* @examples
* wait_for_ui_ready();
* @examples_end
*/
bool
wait_for_ui_ready();
} // namespace service_ctrl
#endif
| 3,190
|
C++
|
.h
| 139
| 20.115108
| 87
| 0.681608
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,048
|
confighttp.h
|
LizardByte_Sunshine/src/confighttp.h
|
/**
* @file src/confighttp.h
* @brief Declarations for the Web UI Config HTTP server.
*/
#pragma once
#include <functional>
#include <string>
#include "thread_safe.h"
#define WEB_DIR SUNSHINE_ASSETS_DIR "/web/"
namespace confighttp {
constexpr auto PORT_HTTPS = 1;
void
start();
} // namespace confighttp
// mime types map
const std::map<std::string, std::string> mime_types = {
{ "css", "text/css" },
{ "gif", "image/gif" },
{ "htm", "text/html" },
{ "html", "text/html" },
{ "ico", "image/x-icon" },
{ "jpeg", "image/jpeg" },
{ "jpg", "image/jpeg" },
{ "js", "application/javascript" },
{ "json", "application/json" },
{ "png", "image/png" },
{ "svg", "image/svg+xml" },
{ "ttf", "font/ttf" },
{ "txt", "text/plain" },
{ "woff2", "font/woff2" },
{ "xml", "text/xml" },
};
| 819
|
C++
|
.h
| 32
| 23.21875
| 57
| 0.597187
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,049
|
file_handler.h
|
LizardByte_Sunshine/src/file_handler.h
|
/**
* @file file_handler.h
* @brief Declarations for file handling functions.
*/
#pragma once
#include <string>
/**
* @brief Responsible for file handling functions.
*/
namespace file_handler {
/**
* @brief Get the parent directory of a file or directory.
* @param path The path of the file or directory.
* @return The parent directory.
* @examples
* std::string parent_dir = get_parent_directory("path/to/file");
* @examples_end
*/
std::string
get_parent_directory(const std::string &path);
/**
* @brief Make a directory.
* @param path The path of the directory.
* @return `true` on success, `false` on failure.
* @examples
* bool dir_created = make_directory("path/to/directory");
* @examples_end
*/
bool
make_directory(const std::string &path);
/**
* @brief Read a file to string.
* @param path The path of the file.
* @return The contents of the file.
* @examples
* std::string contents = read_file("path/to/file");
* @examples_end
*/
std::string
read_file(const char *path);
/**
* @brief Writes a file.
* @param path The path of the file.
* @param contents The contents to write.
* @return ``0`` on success, ``-1`` on failure.
* @examples
* int write_status = write_file("path/to/file", "file contents");
* @examples_end
*/
int
write_file(const char *path, const std::string_view &contents);
} // namespace file_handler
| 1,447
|
C++
|
.h
| 52
| 24.5
| 68
| 0.663309
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,050
|
cbs.h
|
LizardByte_Sunshine/src/cbs.h
|
/**
* @file src/cbs.h
* @brief Declarations for FFmpeg Coded Bitstream API.
*/
#pragma once
#include "utility.h"
struct AVPacket;
struct AVCodecContext;
namespace cbs {
struct nal_t {
util::buffer_t<std::uint8_t> _new;
util::buffer_t<std::uint8_t> old;
};
struct hevc_t {
nal_t vps;
nal_t sps;
};
struct h264_t {
nal_t sps;
};
hevc_t
make_sps_hevc(const AVCodecContext *ctx, const AVPacket *packet);
h264_t
make_sps_h264(const AVCodecContext *ctx, const AVPacket *packet);
/**
* @brief Validates the Sequence Parameter Set (SPS) of a given packet.
* @param packet The packet to validate.
* @param codec_id The ID of the codec used (either AV_CODEC_ID_H264 or AV_CODEC_ID_H265).
* @return True if the SPS->VUI is present in the active SPS of the packet, false otherwise.
*/
bool
validate_sps(const AVPacket *packet, int codec_id);
} // namespace cbs
| 922
|
C++
|
.h
| 33
| 24.757576
| 94
| 0.694665
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,051
|
rswrapper.h
|
LizardByte_Sunshine/src/rswrapper.h
|
/**
* @file src/rswrapper.h
* @brief Wrappers for nanors vectorization
* @details This is a drop-in replacement for nanors rs.h
*/
#pragma once
#include <stdint.h>
typedef struct _reed_solomon reed_solomon;
typedef reed_solomon *(*reed_solomon_new_t)(int data_shards, int parity_shards);
typedef void (*reed_solomon_release_t)(reed_solomon *rs);
typedef int (*reed_solomon_encode_t)(reed_solomon *rs, uint8_t **shards, int nr_shards, int bs);
typedef int (*reed_solomon_decode_t)(reed_solomon *rs, uint8_t **shards, uint8_t *marks, int nr_shards, int bs);
extern reed_solomon_new_t reed_solomon_new_fn;
extern reed_solomon_release_t reed_solomon_release_fn;
extern reed_solomon_encode_t reed_solomon_encode_fn;
extern reed_solomon_decode_t reed_solomon_decode_fn;
#define reed_solomon_new reed_solomon_new_fn
#define reed_solomon_release reed_solomon_release_fn
#define reed_solomon_encode reed_solomon_encode_fn
#define reed_solomon_decode reed_solomon_decode_fn
/**
* @brief This initializes the RS function pointers to the best vectorized version available.
* @details The streaming code will directly invoke these function pointers during encoding.
*/
void
reed_solomon_init(void);
| 1,199
|
C++
|
.h
| 26
| 44.615385
| 112
| 0.783205
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,052
|
rtsp.h
|
LizardByte_Sunshine/src/rtsp.h
|
/**
* @file src/rtsp.h
* @brief Declarations for RTSP streaming.
*/
#pragma once
#include <atomic>
#include "crypto.h"
#include "thread_safe.h"
namespace rtsp_stream {
constexpr auto RTSP_SETUP_PORT = 21;
struct launch_session_t {
uint32_t id;
crypto::aes_t gcm_key;
crypto::aes_t iv;
std::string av_ping_payload;
uint32_t control_connect_data;
bool host_audio;
std::string unique_id;
int width;
int height;
int fps;
int gcmap;
int appid;
int surround_info;
std::string surround_params;
bool enable_hdr;
bool enable_sops;
std::optional<crypto::cipher::gcm_t> rtsp_cipher;
std::string rtsp_url_scheme;
uint32_t rtsp_iv_counter;
};
void
launch_session_raise(std::shared_ptr<launch_session_t> launch_session);
/**
* @brief Clear state for the specified launch session.
* @param launch_session_id The ID of the session to clear.
*/
void
launch_session_clear(uint32_t launch_session_id);
/**
* @brief Get the number of active sessions.
* @return Count of active sessions.
*/
int
session_count();
/**
* @brief Terminates all running streaming sessions.
*/
void
terminate_sessions();
void
rtpThread();
} // namespace rtsp_stream
| 1,272
|
C++
|
.h
| 53
| 20.188679
| 73
| 0.685477
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,053
|
httpcommon.h
|
LizardByte_Sunshine/src/httpcommon.h
|
/**
* @file src/httpcommon.h
* @brief Declarations for common HTTP.
*/
#pragma once
#include "network.h"
#include "thread_safe.h"
namespace http {
int
init();
int
create_creds(const std::string &pkey, const std::string &cert);
int
save_user_creds(
const std::string &file,
const std::string &username,
const std::string &password,
bool run_our_mouth = false);
int
reload_user_creds(const std::string &file);
bool
download_file(const std::string &url, const std::string &file);
std::string
url_escape(const std::string &url);
std::string
url_get_host(const std::string &url);
extern std::string unique_id;
extern net::net_e origin_web_ui_allowed;
} // namespace http
| 724
|
C++
|
.h
| 29
| 22
| 65
| 0.702467
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,055
|
logging.h
|
LizardByte_Sunshine/src/logging.h
|
/**
* @file src/logging.h
* @brief Declarations for logging related functions.
*/
#pragma once
// lib includes
#include <boost/log/common.hpp>
#include <boost/log/sinks.hpp>
using text_sink = boost::log::sinks::asynchronous_sink<boost::log::sinks::text_ostream_backend>;
extern boost::log::sources::severity_logger<int> verbose;
extern boost::log::sources::severity_logger<int> debug;
extern boost::log::sources::severity_logger<int> info;
extern boost::log::sources::severity_logger<int> warning;
extern boost::log::sources::severity_logger<int> error;
extern boost::log::sources::severity_logger<int> fatal;
#ifdef SUNSHINE_TESTS
extern boost::log::sources::severity_logger<int> tests;
#endif
#include "config.h"
#include "stat_trackers.h"
/**
* @brief Handles the initialization and deinitialization of the logging system.
*/
namespace logging {
class deinit_t {
public:
/**
* @brief A destructor that restores the initial state.
*/
~deinit_t();
};
/**
* @brief Deinitialize the logging system.
* @examples
* deinit();
* @examples_end
*/
void
deinit();
void
formatter(const boost::log::record_view &view, boost::log::formatting_ostream &os);
/**
* @brief Initialize the logging system.
* @param min_log_level The minimum log level to output.
* @param log_file The log file to write to.
* @return An object that will deinitialize the logging system when it goes out of scope.
* @examples
* log_init(2, "sunshine.log");
* @examples_end
*/
[[nodiscard]] std::unique_ptr<deinit_t>
init(int min_log_level, const std::string &log_file);
/**
* @brief Setup AV logging.
* @param min_log_level The log level.
*/
void
setup_av_logging(int min_log_level);
/**
* @brief Flush the log.
* @examples
* log_flush();
* @examples_end
*/
void
log_flush();
/**
* @brief Print help to stdout.
* @param name The name of the program.
* @examples
* print_help("sunshine");
* @examples_end
*/
void
print_help(const char *name);
/**
* @brief A helper class for tracking and logging numerical values across a period of time
* @examples
* min_max_avg_periodic_logger<int> logger(debug, "Test time value", "ms", 5s);
* logger.collect_and_log(1);
* // ...
* logger.collect_and_log(2);
* // after 5 seconds
* logger.collect_and_log(3);
* // In the log:
* // [2024:01:01:12:00:00]: Debug: Test time value (min/max/avg): 1ms/3ms/2.00ms
* @examples_end
*/
template <typename T>
class min_max_avg_periodic_logger {
public:
min_max_avg_periodic_logger(boost::log::sources::severity_logger<int> &severity,
std::string_view message,
std::string_view units,
std::chrono::seconds interval_in_seconds = std::chrono::seconds(20)):
severity(severity),
message(message),
units(units),
interval(interval_in_seconds),
enabled(config::sunshine.min_log_level <= severity.default_severity()) {}
void
collect_and_log(const T &value) {
if (enabled) {
auto print_info = [&](const T &min_value, const T &max_value, double avg_value) {
auto f = stat_trackers::two_digits_after_decimal();
if constexpr (std::is_floating_point_v<T>) {
BOOST_LOG(severity.get()) << message << " (min/max/avg): " << f % min_value << units << "/" << f % max_value << units << "/" << f % avg_value << units;
}
else {
BOOST_LOG(severity.get()) << message << " (min/max/avg): " << min_value << units << "/" << max_value << units << "/" << f % avg_value << units;
}
};
tracker.collect_and_callback_on_interval(value, print_info, interval);
}
}
void
collect_and_log(std::function<T()> func) {
if (enabled) collect_and_log(func());
}
void
reset() {
if (enabled) tracker.reset();
}
bool
is_enabled() const {
return enabled;
}
private:
std::reference_wrapper<boost::log::sources::severity_logger<int>> severity;
std::string message;
std::string units;
std::chrono::seconds interval;
bool enabled;
stat_trackers::min_max_avg_tracker<T> tracker;
};
/**
* @brief A helper class for tracking and logging short time intervals across a period of time
* @examples
* time_delta_periodic_logger logger(debug, "Test duration", 5s);
* logger.first_point_now();
* // ...
* logger.second_point_now_and_log();
* // after 5 seconds
* logger.first_point_now();
* // ...
* logger.second_point_now_and_log();
* // In the log:
* // [2024:01:01:12:00:00]: Debug: Test duration (min/max/avg): 1.23ms/3.21ms/2.31ms
* @examples_end
*/
class time_delta_periodic_logger {
public:
time_delta_periodic_logger(boost::log::sources::severity_logger<int> &severity,
std::string_view message,
std::chrono::seconds interval_in_seconds = std::chrono::seconds(20)):
logger(severity, message, "ms", interval_in_seconds) {}
void
first_point(const std::chrono::steady_clock::time_point &point) {
if (logger.is_enabled()) point1 = point;
}
void
first_point_now() {
if (logger.is_enabled()) first_point(std::chrono::steady_clock::now());
}
void
second_point_and_log(const std::chrono::steady_clock::time_point &point) {
if (logger.is_enabled()) {
logger.collect_and_log(std::chrono::duration<double, std::milli>(point - point1).count());
}
}
void
second_point_now_and_log() {
if (logger.is_enabled()) second_point_and_log(std::chrono::steady_clock::now());
}
void
reset() {
if (logger.is_enabled()) logger.reset();
}
bool
is_enabled() const {
return logger.is_enabled();
}
private:
std::chrono::steady_clock::time_point point1 = std::chrono::steady_clock::now();
min_max_avg_periodic_logger<double> logger;
};
/**
* @brief Enclose string in square brackets.
* @param input Input string.
* @return Enclosed string.
*/
std::string
bracket(const std::string &input);
/**
* @brief Enclose string in square brackets.
* @param input Input string.
* @return Enclosed string.
*/
std::wstring
bracket(const std::wstring &input);
} // namespace logging
| 6,347
|
C++
|
.h
| 201
| 26.975124
| 163
| 0.642857
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,056
|
thread_safe.h
|
LizardByte_Sunshine/src/thread_safe.h
|
/**
* @file src/thread_safe.h
* @brief Declarations for thread-safe data structures.
*/
#pragma once
#include <array>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <map>
#include <mutex>
#include <vector>
#include "utility.h"
namespace safe {
template <class T>
class event_t {
public:
using status_t = util::optional_t<T>;
template <class... Args>
void
raise(Args &&...args) {
std::lock_guard lg { _lock };
if (!_continue) {
return;
}
if constexpr (std::is_same_v<std::optional<T>, status_t>) {
_status = std::make_optional<T>(std::forward<Args>(args)...);
}
else {
_status = status_t { std::forward<Args>(args)... };
}
_cv.notify_all();
}
// pop and view should not be used interchangeably
status_t
pop() {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (!_status) {
_cv.wait(ul);
if (!_continue) {
return util::false_v<status_t>;
}
}
auto val = std::move(_status);
_status = util::false_v<status_t>;
return val;
}
// pop and view should not be used interchangeably
template <class Rep, class Period>
status_t
pop(std::chrono::duration<Rep, Period> delay) {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (!_status) {
if (!_continue || _cv.wait_for(ul, delay) == std::cv_status::timeout) {
return util::false_v<status_t>;
}
}
auto val = std::move(_status);
_status = util::false_v<status_t>;
return val;
}
// pop and view should not be used interchangeably
status_t
view() {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (!_status) {
_cv.wait(ul);
if (!_continue) {
return util::false_v<status_t>;
}
}
return _status;
}
// pop and view should not be used interchangeably
template <class Rep, class Period>
status_t
view(std::chrono::duration<Rep, Period> delay) {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (!_status) {
if (!_continue || _cv.wait_for(ul, delay) == std::cv_status::timeout) {
return util::false_v<status_t>;
}
}
return _status;
}
bool
peek() {
return _continue && (bool) _status;
}
void
stop() {
std::lock_guard lg { _lock };
_continue = false;
_cv.notify_all();
}
void
reset() {
std::lock_guard lg { _lock };
_continue = true;
_status = util::false_v<status_t>;
}
[[nodiscard]] bool
running() const {
return _continue;
}
private:
bool _continue { true };
status_t _status { util::false_v<status_t> };
std::condition_variable _cv;
std::mutex _lock;
};
template <class T>
class alarm_raw_t {
public:
using status_t = util::optional_t<T>;
void
ring(const status_t &status) {
std::lock_guard lg(_lock);
_status = status;
_rang = true;
_cv.notify_one();
}
void
ring(status_t &&status) {
std::lock_guard lg(_lock);
_status = std::move(status);
_rang = true;
_cv.notify_one();
}
template <class Rep, class Period>
auto
wait_for(const std::chrono::duration<Rep, Period> &rel_time) {
std::unique_lock ul(_lock);
return _cv.wait_for(ul, rel_time, [this]() { return _rang; });
}
template <class Rep, class Period, class Pred>
auto
wait_for(const std::chrono::duration<Rep, Period> &rel_time, Pred &&pred) {
std::unique_lock ul(_lock);
return _cv.wait_for(ul, rel_time, [this, &pred]() { return _rang || pred(); });
}
template <class Rep, class Period>
auto
wait_until(const std::chrono::duration<Rep, Period> &rel_time) {
std::unique_lock ul(_lock);
return _cv.wait_until(ul, rel_time, [this]() { return _rang; });
}
template <class Rep, class Period, class Pred>
auto
wait_until(const std::chrono::duration<Rep, Period> &rel_time, Pred &&pred) {
std::unique_lock ul(_lock);
return _cv.wait_until(ul, rel_time, [this, &pred]() { return _rang || pred(); });
}
auto
wait() {
std::unique_lock ul(_lock);
_cv.wait(ul, [this]() { return _rang; });
}
template <class Pred>
auto
wait(Pred &&pred) {
std::unique_lock ul(_lock);
_cv.wait(ul, [this, &pred]() { return _rang || pred(); });
}
const status_t &
status() const {
return _status;
}
status_t &
status() {
return _status;
}
void
reset() {
_status = status_t {};
_rang = false;
}
private:
std::mutex _lock;
std::condition_variable _cv;
status_t _status { util::false_v<status_t> };
bool _rang { false };
};
template <class T>
using alarm_t = std::shared_ptr<alarm_raw_t<T>>;
template <class T>
alarm_t<T>
make_alarm() {
return std::make_shared<alarm_raw_t<T>>();
}
template <class T>
class queue_t {
public:
using status_t = util::optional_t<T>;
queue_t(std::uint32_t max_elements = 32):
_max_elements { max_elements } {}
template <class... Args>
void
raise(Args &&...args) {
std::lock_guard ul { _lock };
if (!_continue) {
return;
}
if (_queue.size() == _max_elements) {
_queue.clear();
}
_queue.emplace_back(std::forward<Args>(args)...);
_cv.notify_all();
}
bool
peek() {
return _continue && !_queue.empty();
}
template <class Rep, class Period>
status_t
pop(std::chrono::duration<Rep, Period> delay) {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (_queue.empty()) {
if (!_continue || _cv.wait_for(ul, delay) == std::cv_status::timeout) {
return util::false_v<status_t>;
}
}
auto val = std::move(_queue.front());
_queue.erase(std::begin(_queue));
return val;
}
status_t
pop() {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (_queue.empty()) {
_cv.wait(ul);
if (!_continue) {
return util::false_v<status_t>;
}
}
auto val = std::move(_queue.front());
_queue.erase(std::begin(_queue));
return val;
}
std::vector<T> &
unsafe() {
return _queue;
}
void
stop() {
std::lock_guard lg { _lock };
_continue = false;
_cv.notify_all();
}
[[nodiscard]] bool
running() const {
return _continue;
}
private:
bool _continue { true };
std::uint32_t _max_elements;
std::mutex _lock;
std::condition_variable _cv;
std::vector<T> _queue;
};
template <class T>
class shared_t {
public:
using element_type = T;
using construct_f = std::function<int(element_type &)>;
using destruct_f = std::function<void(element_type &)>;
struct ptr_t {
shared_t *owner;
ptr_t():
owner { nullptr } {}
explicit ptr_t(shared_t *owner):
owner { owner } {}
ptr_t(ptr_t &&ptr) noexcept:
owner { ptr.owner } {
ptr.owner = nullptr;
}
ptr_t(const ptr_t &ptr) noexcept:
owner { ptr.owner } {
if (!owner) {
return;
}
auto tmp = ptr.owner->ref();
tmp.owner = nullptr;
}
ptr_t &
operator=(const ptr_t &ptr) noexcept {
if (!ptr.owner) {
release();
return *this;
}
return *this = std::move(*ptr.owner->ref());
}
ptr_t &
operator=(ptr_t &&ptr) noexcept {
if (owner) {
release();
}
std::swap(owner, ptr.owner);
return *this;
}
~ptr_t() {
if (owner) {
release();
}
}
operator bool() const {
return owner != nullptr;
}
void
release() {
std::lock_guard lg { owner->_lock };
if (!--owner->_count) {
owner->_destruct(*get());
(*this)->~element_type();
}
owner = nullptr;
}
element_type *
get() const {
return reinterpret_cast<element_type *>(owner->_object_buf.data());
}
element_type *
operator->() {
return reinterpret_cast<element_type *>(owner->_object_buf.data());
}
};
template <class FC, class FD>
shared_t(FC &&fc, FD &&fd):
_construct { std::forward<FC>(fc) }, _destruct { std::forward<FD>(fd) } {}
[[nodiscard]] ptr_t
ref() {
std::lock_guard lg { _lock };
if (!_count) {
new (_object_buf.data()) element_type;
if (_construct(*reinterpret_cast<element_type *>(_object_buf.data()))) {
return ptr_t { nullptr };
}
}
++_count;
return ptr_t { this };
}
private:
construct_f _construct;
destruct_f _destruct;
std::array<std::uint8_t, sizeof(element_type)> _object_buf;
std::uint32_t _count;
std::mutex _lock;
};
template <class T, class F_Construct, class F_Destruct>
auto
make_shared(F_Construct &&fc, F_Destruct &&fd) {
return shared_t<T> {
std::forward<F_Construct>(fc), std::forward<F_Destruct>(fd)
};
}
using signal_t = event_t<bool>;
class mail_raw_t;
using mail_t = std::shared_ptr<mail_raw_t>;
void
cleanup(mail_raw_t *);
template <class T>
class post_t: public T {
public:
template <class... Args>
post_t(mail_t mail, Args &&...args):
T(std::forward<Args>(args)...), mail { std::move(mail) } {}
mail_t mail;
~post_t() {
cleanup(mail.get());
}
};
template <class T>
inline auto
lock(const std::weak_ptr<void> &wp) {
return std::reinterpret_pointer_cast<typename T::element_type>(wp.lock());
}
class mail_raw_t: public std::enable_shared_from_this<mail_raw_t> {
public:
template <class T>
using event_t = std::shared_ptr<post_t<event_t<T>>>;
template <class T>
using queue_t = std::shared_ptr<post_t<queue_t<T>>>;
template <class T>
event_t<T>
event(const std::string_view &id) {
std::lock_guard lg { mutex };
auto it = id_to_post.find(id);
if (it != std::end(id_to_post)) {
return lock<event_t<T>>(it->second);
}
auto post = std::make_shared<typename event_t<T>::element_type>(shared_from_this());
id_to_post.emplace(std::pair<std::string, std::weak_ptr<void>> { std::string { id }, post });
return post;
}
template <class T>
queue_t<T>
queue(const std::string_view &id) {
std::lock_guard lg { mutex };
auto it = id_to_post.find(id);
if (it != std::end(id_to_post)) {
return lock<queue_t<T>>(it->second);
}
auto post = std::make_shared<typename queue_t<T>::element_type>(shared_from_this(), 32);
id_to_post.emplace(std::pair<std::string, std::weak_ptr<void>> { std::string { id }, post });
return post;
}
void
cleanup() {
std::lock_guard lg { mutex };
for (auto it = std::begin(id_to_post); it != std::end(id_to_post); ++it) {
auto &weak = it->second;
if (weak.expired()) {
id_to_post.erase(it);
return;
}
}
}
std::mutex mutex;
std::map<std::string, std::weak_ptr<void>, std::less<>> id_to_post;
};
inline void
cleanup(mail_raw_t *mail) {
mail->cleanup();
}
} // namespace safe
| 11,977
|
C++
|
.h
| 443
| 20.625282
| 99
| 0.549649
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,058
|
input.h
|
LizardByte_Sunshine/src/input.h
|
/**
* @file src/input.h
* @brief Declarations for gamepad, keyboard, and mouse input handling.
*/
#pragma once
#include <functional>
#include "platform/common.h"
#include "thread_safe.h"
namespace input {
struct input_t;
void
print(void *input);
void
reset(std::shared_ptr<input_t> &input);
void
passthrough(std::shared_ptr<input_t> &input, std::vector<std::uint8_t> &&input_data);
[[nodiscard]] std::unique_ptr<platf::deinit_t>
init();
bool
probe_gamepads();
std::shared_ptr<input_t>
alloc(safe::mail_t mail);
struct touch_port_t: public platf::touch_port_t {
int env_width, env_height;
// Offset x and y coordinates of the client
float client_offsetX, client_offsetY;
float scalar_inv;
explicit
operator bool() const {
return width != 0 && height != 0 && env_width != 0 && env_height != 0;
}
};
/**
* @brief Scale the ellipse axes according to the provided size.
* @param val The major and minor axis pair.
* @param rotation The rotation value from the touch/pen event.
* @param scalar The scalar cartesian coordinate pair.
* @return The major and minor axis pair.
*/
std::pair<float, float>
scale_client_contact_area(const std::pair<float, float> &val, uint16_t rotation, const std::pair<float, float> &scalar);
} // namespace input
| 1,340
|
C++
|
.h
| 42
| 28.452381
| 122
| 0.692068
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,059
|
network.h
|
LizardByte_Sunshine/src/network.h
|
/**
* @file src/network.h
* @brief Declarations for networking related functions.
*/
#pragma once
#include <tuple>
#include <utility>
#include <boost/asio.hpp>
#include <enet/enet.h>
#include "utility.h"
namespace net {
void
free_host(ENetHost *host);
/**
* @brief Map a specified port based on the base port.
* @param port The port to map as a difference from the base port.
* @return The mapped port number.
* @examples
* std::uint16_t mapped_port = net::map_port(1);
* @examples_end
* @todo Ensure port is not already in use by another application.
*/
std::uint16_t
map_port(int port);
using host_t = util::safe_ptr<ENetHost, free_host>;
using peer_t = ENetPeer *;
using packet_t = util::safe_ptr<ENetPacket, enet_packet_destroy>;
enum net_e : int {
PC, ///< PC
LAN, ///< LAN
WAN ///< WAN
};
enum af_e : int {
IPV4, ///< IPv4 only
BOTH ///< IPv4 and IPv6
};
net_e
from_enum_string(const std::string_view &view);
std::string_view
to_enum_string(net_e net);
net_e
from_address(const std::string_view &view);
host_t
host_create(af_e af, ENetAddress &addr, std::uint16_t port);
/**
* @brief Get the address family enum value from a string.
* @param view The config option value.
* @return The address family enum value.
*/
af_e
af_from_enum_string(const std::string_view &view);
/**
* @brief Get the wildcard binding address for a given address family.
* @param af Address family.
* @return Normalized address.
*/
std::string_view
af_to_any_address_string(af_e af);
/**
* @brief Convert an address to a normalized form.
* @details Normalization converts IPv4-mapped IPv6 addresses into IPv4 addresses.
* @param address The address to normalize.
* @return Normalized address.
*/
boost::asio::ip::address
normalize_address(boost::asio::ip::address address);
/**
* @brief Get the given address in normalized string form.
* @details Normalization converts IPv4-mapped IPv6 addresses into IPv4 addresses.
* @param address The address to normalize.
* @return Normalized address in string form.
*/
std::string
addr_to_normalized_string(boost::asio::ip::address address);
/**
* @brief Get the given address in a normalized form for the host portion of a URL.
* @details Normalization converts IPv4-mapped IPv6 addresses into IPv4 addresses.
* @param address The address to normalize and escape.
* @return Normalized address in URL-escaped string.
*/
std::string
addr_to_url_escaped_string(boost::asio::ip::address address);
/**
* @brief Get the encryption mode for the given remote endpoint address.
* @param address The address used to look up the desired encryption mode.
* @return The WAN or LAN encryption mode, based on the provided address.
*/
int
encryption_mode_for_address(boost::asio::ip::address address);
/**
* @brief Returns a string for use as the instance name for mDNS.
* @param hostname The hostname to use for instance name generation.
* @return Hostname-based instance name or "Sunshine" if hostname is invalid.
*/
std::string
mdns_instance_name(const std::string_view &hostname);
} // namespace net
| 3,257
|
C++
|
.h
| 97
| 30.092784
| 85
| 0.702642
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,060
|
task_pool.h
|
LizardByte_Sunshine/src/task_pool.h
|
/**
* @file src/task_pool.h
* @brief Declarations for the task pool system.
*/
#pragma once
#include <chrono>
#include <deque>
#include <functional>
#include <future>
#include <mutex>
#include <optional>
#include <type_traits>
#include <utility>
#include <vector>
#include "move_by_copy.h"
#include "utility.h"
namespace task_pool_util {
class _ImplBase {
public:
// _unique_base_type _this_ptr;
inline virtual ~_ImplBase() = default;
virtual void
run() = 0;
};
template <class Function>
class _Impl: public _ImplBase {
Function _func;
public:
_Impl(Function &&f):
_func(std::forward<Function>(f)) {}
void
run() override {
_func();
}
};
class TaskPool {
public:
typedef std::unique_ptr<_ImplBase> __task;
typedef _ImplBase *task_id_t;
typedef std::chrono::steady_clock::time_point __time_point;
template <class R>
class timer_task_t {
public:
task_id_t task_id;
std::future<R> future;
timer_task_t(task_id_t task_id, std::future<R> &future):
task_id { task_id }, future { std::move(future) } {}
};
protected:
std::deque<__task> _tasks;
std::vector<std::pair<__time_point, __task>> _timer_tasks;
std::mutex _task_mutex;
public:
TaskPool() = default;
TaskPool(TaskPool &&other) noexcept:
_tasks { std::move(other._tasks) }, _timer_tasks { std::move(other._timer_tasks) } {}
TaskPool &
operator=(TaskPool &&other) noexcept {
std::swap(_tasks, other._tasks);
std::swap(_timer_tasks, other._timer_tasks);
return *this;
}
template <class Function, class... Args>
auto
push(Function &&newTask, Args &&...args) {
static_assert(std::is_invocable_v<Function, Args &&...>, "arguments don't match the function");
using __return = std::invoke_result_t<Function, Args &&...>;
using task_t = std::packaged_task<__return()>;
auto bind = [task = std::forward<Function>(newTask), tuple_args = std::make_tuple(std::forward<Args>(args)...)]() mutable {
return std::apply(task, std::move(tuple_args));
};
task_t task(std::move(bind));
auto future = task.get_future();
std::lock_guard<std::mutex> lg(_task_mutex);
_tasks.emplace_back(toRunnable(std::move(task)));
return future;
}
void
pushDelayed(std::pair<__time_point, __task> &&task) {
std::lock_guard lg(_task_mutex);
auto it = _timer_tasks.cbegin();
for (; it < _timer_tasks.cend(); ++it) {
if (std::get<0>(*it) < task.first) {
break;
}
}
_timer_tasks.emplace(it, task.first, std::move(task.second));
}
/**
* @return An id to potentially delay the task.
*/
template <class Function, class X, class Y, class... Args>
auto
pushDelayed(Function &&newTask, std::chrono::duration<X, Y> duration, Args &&...args) {
static_assert(std::is_invocable_v<Function, Args &&...>, "arguments don't match the function");
using __return = std::invoke_result_t<Function, Args &&...>;
using task_t = std::packaged_task<__return()>;
__time_point time_point;
if constexpr (std::is_floating_point_v<X>) {
time_point = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::nanoseconds>(duration);
}
else {
time_point = std::chrono::steady_clock::now() + duration;
}
auto bind = [task = std::forward<Function>(newTask), tuple_args = std::make_tuple(std::forward<Args>(args)...)]() mutable {
return std::apply(task, std::move(tuple_args));
};
task_t task(std::move(bind));
auto future = task.get_future();
auto runnable = toRunnable(std::move(task));
task_id_t task_id = &*runnable;
pushDelayed(std::pair { time_point, std::move(runnable) });
return timer_task_t<__return> { task_id, future };
}
/**
* @param task_id The id of the task to delay.
* @param duration The delay before executing the task.
*/
template <class X, class Y>
void
delay(task_id_t task_id, std::chrono::duration<X, Y> duration) {
std::lock_guard<std::mutex> lg(_task_mutex);
auto it = _timer_tasks.begin();
for (; it < _timer_tasks.cend(); ++it) {
const __task &task = std::get<1>(*it);
if (&*task == task_id) {
std::get<0>(*it) = std::chrono::steady_clock::now() + duration;
break;
}
}
if (it == _timer_tasks.cend()) {
return;
}
// smaller time goes to the back
auto prev = it - 1;
while (it > _timer_tasks.cbegin()) {
if (std::get<0>(*it) > std::get<0>(*prev)) {
std::swap(*it, *prev);
}
--prev;
--it;
}
}
bool
cancel(task_id_t task_id) {
std::lock_guard lg(_task_mutex);
auto it = _timer_tasks.begin();
for (; it < _timer_tasks.cend(); ++it) {
const __task &task = std::get<1>(*it);
if (&*task == task_id) {
_timer_tasks.erase(it);
return true;
}
}
return false;
}
std::optional<std::pair<__time_point, __task>>
pop(task_id_t task_id) {
std::lock_guard lg(_task_mutex);
auto pos = std::find_if(std::begin(_timer_tasks), std::end(_timer_tasks), [&task_id](const auto &t) { return t.second.get() == task_id; });
if (pos == std::end(_timer_tasks)) {
return std::nullopt;
}
return std::move(*pos);
}
std::optional<__task>
pop() {
std::lock_guard lg(_task_mutex);
if (!_tasks.empty()) {
__task task = std::move(_tasks.front());
_tasks.pop_front();
return task;
}
if (!_timer_tasks.empty() && std::get<0>(_timer_tasks.back()) <= std::chrono::steady_clock::now()) {
__task task = std::move(std::get<1>(_timer_tasks.back()));
_timer_tasks.pop_back();
return task;
}
return std::nullopt;
}
bool
ready() {
std::lock_guard<std::mutex> lg(_task_mutex);
return !_tasks.empty() || (!_timer_tasks.empty() && std::get<0>(_timer_tasks.back()) <= std::chrono::steady_clock::now());
}
std::optional<__time_point>
next() {
std::lock_guard<std::mutex> lg(_task_mutex);
if (_timer_tasks.empty()) {
return std::nullopt;
}
return std::get<0>(_timer_tasks.back());
}
private:
template <class Function>
std::unique_ptr<_ImplBase>
toRunnable(Function &&f) {
return std::make_unique<_Impl<Function>>(std::forward<Function &&>(f));
}
};
} // namespace task_pool_util
| 6,711
|
C++
|
.h
| 201
| 27.079602
| 145
| 0.579625
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,061
|
config.h
|
LizardByte_Sunshine/src/config.h
|
/**
* @file src/config.h
* @brief Declarations for the configuration of Sunshine.
*/
#pragma once
#include <bitset>
#include <chrono>
#include <optional>
#include <string>
#include <unordered_map>
#include <vector>
#include "nvenc/nvenc_config.h"
namespace config {
struct video_t {
// ffmpeg params
int qp; // higher == more compression and less quality
int hevc_mode;
int av1_mode;
int min_fps_factor; // Minimum fps target, determines minimum frame time
int min_threads; // Minimum number of threads/slices for CPU encoding
struct {
std::string sw_preset;
std::string sw_tune;
std::optional<int> svtav1_preset;
} sw;
nvenc::nvenc_config nv;
bool nv_realtime_hags;
bool nv_opengl_vulkan_on_dxgi;
bool nv_sunshine_high_power_mode;
struct {
int preset;
int multipass;
int h264_coder;
int aq;
int vbv_percentage_increase;
} nv_legacy;
struct {
std::optional<int> qsv_preset;
std::optional<int> qsv_cavlc;
bool qsv_slow_hevc;
} qsv;
struct {
std::optional<int> amd_usage_h264;
std::optional<int> amd_usage_hevc;
std::optional<int> amd_usage_av1;
std::optional<int> amd_rc_h264;
std::optional<int> amd_rc_hevc;
std::optional<int> amd_rc_av1;
std::optional<int> amd_enforce_hrd;
std::optional<int> amd_quality_h264;
std::optional<int> amd_quality_hevc;
std::optional<int> amd_quality_av1;
std::optional<int> amd_preanalysis;
std::optional<int> amd_vbaq;
int amd_coder;
} amd;
struct {
int vt_allow_sw;
int vt_require_sw;
int vt_realtime;
int vt_coder;
} vt;
struct {
bool strict_rc_buffer;
} vaapi;
std::string capture;
std::string encoder;
std::string adapter_name;
std::string output_name;
};
struct audio_t {
std::string sink;
std::string virtual_sink;
bool install_steam_drivers;
};
constexpr int ENCRYPTION_MODE_NEVER = 0; // Never use video encryption, even if the client supports it
constexpr int ENCRYPTION_MODE_OPPORTUNISTIC = 1; // Use video encryption if available, but stream without it if not supported
constexpr int ENCRYPTION_MODE_MANDATORY = 2; // Always use video encryption and refuse clients that can't encrypt
struct stream_t {
std::chrono::milliseconds ping_timeout;
std::string file_apps;
int fec_percentage;
// Video encryption settings for LAN and WAN streams
int lan_encryption_mode;
int wan_encryption_mode;
};
struct nvhttp_t {
// Could be any of the following values:
// pc|lan|wan
std::string origin_web_ui_allowed;
std::string pkey;
std::string cert;
std::string sunshine_name;
std::string file_state;
std::string external_ip;
};
struct input_t {
std::unordered_map<int, int> keybindings;
std::chrono::milliseconds back_button_timeout;
std::chrono::milliseconds key_repeat_delay;
std::chrono::duration<double> key_repeat_period;
std::string gamepad;
bool ds4_back_as_touchpad_click;
bool motion_as_ds4;
bool touchpad_as_ds4;
bool keyboard;
bool mouse;
bool controller;
bool always_send_scancodes;
bool high_resolution_scrolling;
bool native_pen_touch;
};
namespace flag {
enum flag_e : std::size_t {
PIN_STDIN = 0, ///< Read PIN from stdin instead of http
FRESH_STATE, ///< Do not load or save state
FORCE_VIDEO_HEADER_REPLACE, ///< force replacing headers inside video data
UPNP, ///< Try Universal Plug 'n Play
CONST_PIN, ///< Use "universal" pin
FLAG_SIZE ///< Number of flags
};
}
struct prep_cmd_t {
prep_cmd_t(std::string &&do_cmd, std::string &&undo_cmd, bool &&elevated):
do_cmd(std::move(do_cmd)), undo_cmd(std::move(undo_cmd)), elevated(std::move(elevated)) {}
explicit prep_cmd_t(std::string &&do_cmd, bool &&elevated):
do_cmd(std::move(do_cmd)), elevated(std::move(elevated)) {}
std::string do_cmd;
std::string undo_cmd;
bool elevated;
};
struct sunshine_t {
std::string locale;
int min_log_level;
std::bitset<flag::FLAG_SIZE> flags;
std::string credentials_file;
std::string username;
std::string password;
std::string salt;
std::string config_file;
struct cmd_t {
std::string name;
int argc;
char **argv;
} cmd;
std::uint16_t port;
std::string address_family;
std::string log_file;
bool notify_pre_releases;
std::vector<prep_cmd_t> prep_cmds;
};
extern video_t video;
extern audio_t audio;
extern stream_t stream;
extern nvhttp_t nvhttp;
extern input_t input;
extern sunshine_t sunshine;
int
parse(int argc, char *argv[]);
std::unordered_map<std::string, std::string>
parse_config(const std::string_view &file_content);
} // namespace config
| 4,954
|
C++
|
.h
| 162
| 25.512346
| 128
| 0.665124
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,062
|
nvenc_d3d11_on_cuda.h
|
LizardByte_Sunshine/src/nvenc/nvenc_d3d11_on_cuda.h
|
/**
* @file src/nvenc/nvenc_d3d11_on_cuda.h
* @brief Declarations for CUDA NVENC encoder with Direct3D11 input surfaces.
*/
#pragma once
#ifdef _WIN32
#include "nvenc_d3d11.h"
#include <ffnvcodec/dynlink_cuda.h>
namespace nvenc {
/**
* @brief Interop Direct3D11 on CUDA NVENC encoder.
* Input surface is Direct3D11, encoding is performed by CUDA.
*/
class nvenc_d3d11_on_cuda final: public nvenc_d3d11 {
public:
/**
* @param d3d_device Direct3D11 device that will create input surface texture.
* CUDA encoding device will be derived from it.
*/
explicit nvenc_d3d11_on_cuda(ID3D11Device *d3d_device);
~nvenc_d3d11_on_cuda();
ID3D11Texture2D *
get_input_texture() override;
private:
bool
init_library() override;
bool
create_and_register_input_buffer() override;
bool
synchronize_input_buffer() override;
bool
cuda_succeeded(CUresult result);
bool
cuda_failed(CUresult result);
struct autopop_context {
autopop_context(nvenc_d3d11_on_cuda &parent, CUcontext pushed_context):
parent(parent),
pushed_context(pushed_context) {
}
~autopop_context();
explicit
operator bool() const {
return pushed_context != nullptr;
}
nvenc_d3d11_on_cuda &parent;
CUcontext pushed_context = nullptr;
};
autopop_context
push_context();
HMODULE dll = NULL;
const ID3D11DevicePtr d3d_device;
ID3D11Texture2DPtr d3d_input_texture;
struct {
tcuInit *cuInit;
tcuD3D11GetDevice *cuD3D11GetDevice;
tcuCtxCreate_v2 *cuCtxCreate;
tcuCtxDestroy_v2 *cuCtxDestroy;
tcuCtxPushCurrent_v2 *cuCtxPushCurrent;
tcuCtxPopCurrent_v2 *cuCtxPopCurrent;
tcuMemAllocPitch_v2 *cuMemAllocPitch;
tcuMemFree_v2 *cuMemFree;
tcuGraphicsD3D11RegisterResource *cuGraphicsD3D11RegisterResource;
tcuGraphicsUnregisterResource *cuGraphicsUnregisterResource;
tcuGraphicsMapResources *cuGraphicsMapResources;
tcuGraphicsUnmapResources *cuGraphicsUnmapResources;
tcuGraphicsSubResourceGetMappedArray *cuGraphicsSubResourceGetMappedArray;
tcuMemcpy2D_v2 *cuMemcpy2D;
HMODULE dll;
} cuda_functions = {};
CUresult last_cuda_error = CUDA_SUCCESS;
CUcontext cuda_context = nullptr;
CUgraphicsResource cuda_d3d_input_texture = nullptr;
CUdeviceptr cuda_surface = 0;
size_t cuda_surface_pitch = 0;
};
} // namespace nvenc
#endif
| 2,626
|
C++
|
.h
| 77
| 27.415584
| 83
| 0.688172
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,063
|
nvenc_d3d11.h
|
LizardByte_Sunshine/src/nvenc/nvenc_d3d11.h
|
/**
* @file src/nvenc/nvenc_d3d11.h
* @brief Declarations for abstract Direct3D11 NVENC encoder.
*/
#pragma once
#ifdef _WIN32
#include <comdef.h>
#include <d3d11.h>
#include "nvenc_base.h"
namespace nvenc {
_COM_SMARTPTR_TYPEDEF(ID3D11Device, IID_ID3D11Device);
_COM_SMARTPTR_TYPEDEF(ID3D11Texture2D, IID_ID3D11Texture2D);
_COM_SMARTPTR_TYPEDEF(IDXGIDevice, IID_IDXGIDevice);
_COM_SMARTPTR_TYPEDEF(IDXGIAdapter, IID_IDXGIAdapter);
/**
* @brief Abstract Direct3D11 NVENC encoder.
* Encapsulates common code used by native and interop implementations.
*/
class nvenc_d3d11: public nvenc_base {
public:
explicit nvenc_d3d11(NV_ENC_DEVICE_TYPE device_type):
nvenc_base(device_type) {}
~nvenc_d3d11();
/**
* @brief Get input surface texture.
* @return Input surface texture.
*/
virtual ID3D11Texture2D *
get_input_texture() = 0;
protected:
bool
init_library() override;
private:
HMODULE dll = NULL;
};
} // namespace nvenc
#endif
| 1,084
|
C++
|
.h
| 37
| 24.243243
| 81
| 0.680623
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,064
|
nvenc_utils.h
|
LizardByte_Sunshine/src/nvenc/nvenc_utils.h
|
/**
* @file src/nvenc/nvenc_utils.h
* @brief Declarations for NVENC utilities.
*/
#pragma once
#ifdef _WIN32
#include <dxgiformat.h>
#endif
#include "nvenc_colorspace.h"
#include "src/platform/common.h"
#include "src/video_colorspace.h"
#include <ffnvcodec/nvEncodeAPI.h>
namespace nvenc {
#ifdef _WIN32
DXGI_FORMAT
dxgi_format_from_nvenc_format(NV_ENC_BUFFER_FORMAT format);
#endif
NV_ENC_BUFFER_FORMAT
nvenc_format_from_sunshine_format(platf::pix_fmt_e format);
nvenc_colorspace_t
nvenc_colorspace_from_sunshine_colorspace(const video::sunshine_colorspace_t &sunshine_colorspace);
} // namespace nvenc
| 662
|
C++
|
.h
| 22
| 26.5
| 102
| 0.750804
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,065
|
nvenc_base.h
|
LizardByte_Sunshine/src/nvenc/nvenc_base.h
|
/**
* @file src/nvenc/nvenc_base.h
* @brief Declarations for abstract platform-agnostic base of standalone NVENC encoder.
*/
#pragma once
#include "nvenc_colorspace.h"
#include "nvenc_config.h"
#include "nvenc_encoded_frame.h"
#include "src/logging.h"
#include "src/video.h"
#include <ffnvcodec/nvEncodeAPI.h>
/**
* @brief Standalone NVENC encoder
*/
namespace nvenc {
/**
* @brief Abstract platform-agnostic base of standalone NVENC encoder.
* Derived classes perform platform-specific operations.
*/
class nvenc_base {
public:
/**
* @param device_type Underlying device type used by derived class.
*/
explicit nvenc_base(NV_ENC_DEVICE_TYPE device_type);
virtual ~nvenc_base();
nvenc_base(const nvenc_base &) = delete;
nvenc_base &
operator=(const nvenc_base &) = delete;
/**
* @brief Create the encoder.
* @param config NVENC encoder configuration.
* @param client_config Stream configuration requested by the client.
* @param colorspace YUV colorspace.
* @param buffer_format Platform-agnostic input surface format.
* @return `true` on success, `false` on error
*/
bool
create_encoder(const nvenc_config &config, const video::config_t &client_config, const nvenc_colorspace_t &colorspace, NV_ENC_BUFFER_FORMAT buffer_format);
/**
* @brief Destroy the encoder.
* Derived classes classes call it in the destructor.
*/
void
destroy_encoder();
/**
* @brief Encode the next frame using platform-specific input surface.
* @param frame_index Frame index that uniquely identifies the frame.
* Afterwards serves as parameter for `invalidate_ref_frames()`.
* No restrictions on the first frame index, but later frame indexes must be subsequent.
* @param force_idr Whether to encode frame as forced IDR.
* @return Encoded frame.
*/
nvenc_encoded_frame
encode_frame(uint64_t frame_index, bool force_idr);
/**
* @brief Perform reference frame invalidation (RFI) procedure.
* @param first_frame First frame index of the invalidation range.
* @param last_frame Last frame index of the invalidation range.
* @return `true` on success, `false` on error.
* After error next frame must be encoded with `force_idr = true`.
*/
bool
invalidate_ref_frames(uint64_t first_frame, uint64_t last_frame);
protected:
/**
* @brief Required. Used for loading NvEnc library and setting `nvenc` variable with `NvEncodeAPICreateInstance()`.
* Called during `create_encoder()` if `nvenc` variable is not initialized.
* @return `true` on success, `false` on error
*/
virtual bool
init_library() = 0;
/**
* @brief Required. Used for creating outside-facing input surface,
* registering this surface with `nvenc->nvEncRegisterResource()` and setting `registered_input_buffer` variable.
* Called during `create_encoder()`.
* @return `true` on success, `false` on error
*/
virtual bool
create_and_register_input_buffer() = 0;
/**
* @brief Optional. Override if you must perform additional operations on the registered input surface in the beginning of `encode_frame()`.
* Typically used for interop copy.
* @return `true` on success, `false` on error
*/
virtual bool
synchronize_input_buffer() { return true; }
/**
* @brief Optional. Override if you want to create encoder in async mode.
* In this case must also set `async_event_handle` variable.
* @param timeout_ms Wait timeout in milliseconds
* @return `true` on success, `false` on timeout or error
*/
virtual bool
wait_for_async_event(uint32_t timeout_ms) { return false; }
bool
nvenc_failed(NVENCSTATUS status);
/**
* @brief This function returns the corresponding struct version for the minimum API required by the codec.
* @details Reducing the struct versions maximizes driver compatibility by avoiding needless API breaks.
* @param version The raw structure version from `NVENCAPI_STRUCT_VERSION()`.
* @param v11_struct_version Optionally specifies the struct version to use with v11 SDK major versions.
* @param v12_struct_version Optionally specifies the struct version to use with v12 SDK major versions.
* @return A suitable struct version for the active codec.
*/
uint32_t
min_struct_version(uint32_t version, uint32_t v11_struct_version = 0, uint32_t v12_struct_version = 0);
const NV_ENC_DEVICE_TYPE device_type;
void *encoder = nullptr;
struct {
uint32_t width = 0;
uint32_t height = 0;
NV_ENC_BUFFER_FORMAT buffer_format = NV_ENC_BUFFER_FORMAT_UNDEFINED;
uint32_t ref_frames_in_dpb = 0;
bool rfi = false;
} encoder_params;
std::string last_nvenc_error_string;
// Derived classes set these variables
void *device = nullptr; ///< Platform-specific handle of encoding device.
///< Should be set in constructor or `init_library()`.
std::shared_ptr<NV_ENCODE_API_FUNCTION_LIST> nvenc; ///< Function pointers list produced by `NvEncodeAPICreateInstance()`.
///< Should be set in `init_library()`.
NV_ENC_REGISTERED_PTR registered_input_buffer = nullptr; ///< Platform-specific input surface registered with `NvEncRegisterResource()`.
///< Should be set in `create_and_register_input_buffer()`.
void *async_event_handle = nullptr; ///< (optional) Platform-specific handle of event object event.
///< Can be set in constructor or `init_library()`, must override `wait_for_async_event()`.
private:
NV_ENC_OUTPUT_PTR output_bitstream = nullptr;
uint32_t minimum_api_version = 0;
struct {
uint64_t last_encoded_frame_index = 0;
bool rfi_needs_confirmation = false;
std::pair<uint64_t, uint64_t> last_rfi_range;
logging::min_max_avg_periodic_logger<double> frame_size_logger = { debug, "NvEnc: encoded frame sizes in kB", "" };
} encoder_state;
};
} // namespace nvenc
| 6,466
|
C++
|
.h
| 137
| 39.627737
| 160
| 0.655787
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,066
|
nvenc_colorspace.h
|
LizardByte_Sunshine/src/nvenc/nvenc_colorspace.h
|
/**
* @file src/nvenc/nvenc_colorspace.h
* @brief Declarations for NVENC YUV colorspace.
*/
#pragma once
#include <ffnvcodec/nvEncodeAPI.h>
namespace nvenc {
/**
* @brief YUV colorspace and color range.
*/
struct nvenc_colorspace_t {
NV_ENC_VUI_COLOR_PRIMARIES primaries;
NV_ENC_VUI_TRANSFER_CHARACTERISTIC tranfer_function;
NV_ENC_VUI_MATRIX_COEFFS matrix;
bool full_range;
};
} // namespace nvenc
| 456
|
C++
|
.h
| 17
| 22.529412
| 57
| 0.696056
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,067
|
nvenc_config.h
|
LizardByte_Sunshine/src/nvenc/nvenc_config.h
|
/**
* @file src/nvenc/nvenc_config.h
* @brief Declarations for NVENC encoder configuration.
*/
#pragma once
namespace nvenc {
enum class nvenc_two_pass {
disabled, ///< Single pass, the fastest and no extra vram
quarter_resolution, ///< Larger motion vectors being caught, faster and uses less extra vram
full_resolution, ///< Better overall statistics, slower and uses more extra vram
};
/**
* @brief NVENC encoder configuration.
*/
struct nvenc_config {
// Quality preset from 1 to 7, higher is slower
int quality_preset = 1;
// Use optional preliminary pass for better motion vectors, bitrate distribution and stricter VBV(HRD), uses CUDA cores
nvenc_two_pass two_pass = nvenc_two_pass::quarter_resolution;
// Percentage increase of VBV/HRD from the default single frame, allows low-latency variable bitrate
int vbv_percentage_increase = 0;
// Improves fades compression, uses CUDA cores
bool weighted_prediction = false;
// Allocate more bitrate to flat regions since they're visually more perceptible, uses CUDA cores
bool adaptive_quantization = false;
// Don't use QP below certain value, limits peak image quality to save bitrate
bool enable_min_qp = false;
// Min QP value for H.264 when enable_min_qp is selected
unsigned min_qp_h264 = 19;
// Min QP value for HEVC when enable_min_qp is selected
unsigned min_qp_hevc = 23;
// Min QP value for AV1 when enable_min_qp is selected
unsigned min_qp_av1 = 23;
// Use CAVLC entropy coding in H.264 instead of CABAC, not relevant and here for historical reasons
bool h264_cavlc = false;
// Add filler data to encoded frames to stay at target bitrate, mainly for testing
bool insert_filler_data = false;
};
} // namespace nvenc
| 1,869
|
C++
|
.h
| 39
| 42.153846
| 124
| 0.709212
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,068
|
nvenc_d3d11_native.h
|
LizardByte_Sunshine/src/nvenc/nvenc_d3d11_native.h
|
/**
* @file src/nvenc/nvenc_d3d11_native.h
* @brief Declarations for native Direct3D11 NVENC encoder.
*/
#pragma once
#ifdef _WIN32
#include <comdef.h>
#include <d3d11.h>
#include "nvenc_d3d11.h"
namespace nvenc {
/**
* @brief Native Direct3D11 NVENC encoder.
*/
class nvenc_d3d11_native final: public nvenc_d3d11 {
public:
/**
* @param d3d_device Direct3D11 device used for encoding.
*/
explicit nvenc_d3d11_native(ID3D11Device *d3d_device);
~nvenc_d3d11_native();
ID3D11Texture2D *
get_input_texture() override;
private:
bool
create_and_register_input_buffer() override;
const ID3D11DevicePtr d3d_device;
ID3D11Texture2DPtr d3d_input_texture;
};
} // namespace nvenc
#endif
| 794
|
C++
|
.h
| 30
| 21.566667
| 62
| 0.680481
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,069
|
nvenc_encoded_frame.h
|
LizardByte_Sunshine/src/nvenc/nvenc_encoded_frame.h
|
/**
* @file src/nvenc/nvenc_encoded_frame.h
* @brief Declarations for NVENC encoded frame.
*/
#pragma once
#include <cstdint>
#include <vector>
namespace nvenc {
/**
* @brief Encoded frame.
*/
struct nvenc_encoded_frame {
std::vector<uint8_t> data;
uint64_t frame_index = 0;
bool idr = false;
bool after_ref_frame_invalidation = false;
};
} // namespace nvenc
| 418
|
C++
|
.h
| 18
| 19.055556
| 48
| 0.65051
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,070
|
common.h
|
LizardByte_Sunshine/src/platform/common.h
|
/**
* @file src/platform/common.h
* @brief Declarations for common platform specific utilities.
*/
#pragma once
#include <bitset>
#include <filesystem>
#include <functional>
#include <mutex>
#include <string>
#include <boost/core/noncopyable.hpp>
#ifndef _WIN32
#include <boost/asio.hpp>
#include <boost/process.hpp>
#endif
#include "src/config.h"
#include "src/logging.h"
#include "src/thread_safe.h"
#include "src/utility.h"
#include "src/video_colorspace.h"
extern "C" {
#include <moonlight-common-c/src/Limelight.h>
}
using namespace std::literals;
struct sockaddr;
struct AVFrame;
struct AVBufferRef;
struct AVHWFramesContext;
struct AVCodecContext;
struct AVDictionary;
#ifdef _WIN32
// Forward declarations of boost classes to avoid having to include boost headers
// here, which results in issues with Windows.h and WinSock2.h include order.
namespace boost {
namespace asio {
namespace ip {
class address;
} // namespace ip
} // namespace asio
namespace filesystem {
class path;
}
namespace process::inline v1 {
class child;
class group;
template <typename Char>
class basic_environment;
typedef basic_environment<char> environment;
} // namespace process::inline v1
} // namespace boost
#endif
namespace video {
struct config_t;
} // namespace video
namespace nvenc {
class nvenc_base;
}
namespace platf {
// Limited by bits in activeGamepadMask
constexpr auto MAX_GAMEPADS = 16;
constexpr std::uint32_t DPAD_UP = 0x0001;
constexpr std::uint32_t DPAD_DOWN = 0x0002;
constexpr std::uint32_t DPAD_LEFT = 0x0004;
constexpr std::uint32_t DPAD_RIGHT = 0x0008;
constexpr std::uint32_t START = 0x0010;
constexpr std::uint32_t BACK = 0x0020;
constexpr std::uint32_t LEFT_STICK = 0x0040;
constexpr std::uint32_t RIGHT_STICK = 0x0080;
constexpr std::uint32_t LEFT_BUTTON = 0x0100;
constexpr std::uint32_t RIGHT_BUTTON = 0x0200;
constexpr std::uint32_t HOME = 0x0400;
constexpr std::uint32_t A = 0x1000;
constexpr std::uint32_t B = 0x2000;
constexpr std::uint32_t X = 0x4000;
constexpr std::uint32_t Y = 0x8000;
constexpr std::uint32_t PADDLE1 = 0x010000;
constexpr std::uint32_t PADDLE2 = 0x020000;
constexpr std::uint32_t PADDLE3 = 0x040000;
constexpr std::uint32_t PADDLE4 = 0x080000;
constexpr std::uint32_t TOUCHPAD_BUTTON = 0x100000;
constexpr std::uint32_t MISC_BUTTON = 0x200000;
struct supported_gamepad_t {
std::string name;
bool is_enabled;
std::string reason_disabled;
};
enum class gamepad_feedback_e {
rumble, ///< Rumble
rumble_triggers, ///< Rumble triggers
set_motion_event_state, ///< Set motion event state
set_rgb_led, ///< Set RGB LED
};
struct gamepad_feedback_msg_t {
static gamepad_feedback_msg_t
make_rumble(std::uint16_t id, std::uint16_t lowfreq, std::uint16_t highfreq) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::rumble;
msg.id = id;
msg.data.rumble = { lowfreq, highfreq };
return msg;
}
static gamepad_feedback_msg_t
make_rumble_triggers(std::uint16_t id, std::uint16_t left, std::uint16_t right) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::rumble_triggers;
msg.id = id;
msg.data.rumble_triggers = { left, right };
return msg;
}
static gamepad_feedback_msg_t
make_motion_event_state(std::uint16_t id, std::uint8_t motion_type, std::uint16_t report_rate) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::set_motion_event_state;
msg.id = id;
msg.data.motion_event_state.motion_type = motion_type;
msg.data.motion_event_state.report_rate = report_rate;
return msg;
}
static gamepad_feedback_msg_t
make_rgb_led(std::uint16_t id, std::uint8_t r, std::uint8_t g, std::uint8_t b) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::set_rgb_led;
msg.id = id;
msg.data.rgb_led = { r, g, b };
return msg;
}
gamepad_feedback_e type;
std::uint16_t id;
union {
struct {
std::uint16_t lowfreq;
std::uint16_t highfreq;
} rumble;
struct {
std::uint16_t left_trigger;
std::uint16_t right_trigger;
} rumble_triggers;
struct {
std::uint16_t report_rate;
std::uint8_t motion_type;
} motion_event_state;
struct {
std::uint8_t r;
std::uint8_t g;
std::uint8_t b;
} rgb_led;
} data;
};
using feedback_queue_t = safe::mail_raw_t::queue_t<gamepad_feedback_msg_t>;
namespace speaker {
enum speaker_e {
FRONT_LEFT, ///< Front left
FRONT_RIGHT, ///< Front right
FRONT_CENTER, ///< Front center
LOW_FREQUENCY, ///< Low frequency
BACK_LEFT, ///< Back left
BACK_RIGHT, ///< Back right
SIDE_LEFT, ///< Side left
SIDE_RIGHT, ///< Side right
MAX_SPEAKERS, ///< Maximum number of speakers
};
constexpr std::uint8_t map_stereo[] {
FRONT_LEFT, FRONT_RIGHT
};
constexpr std::uint8_t map_surround51[] {
FRONT_LEFT,
FRONT_RIGHT,
FRONT_CENTER,
LOW_FREQUENCY,
BACK_LEFT,
BACK_RIGHT,
};
constexpr std::uint8_t map_surround71[] {
FRONT_LEFT,
FRONT_RIGHT,
FRONT_CENTER,
LOW_FREQUENCY,
BACK_LEFT,
BACK_RIGHT,
SIDE_LEFT,
SIDE_RIGHT,
};
} // namespace speaker
enum class mem_type_e {
system, ///< System memory
vaapi, ///< VAAPI
dxgi, ///< DXGI
cuda, ///< CUDA
videotoolbox, ///< VideoToolbox
unknown ///< Unknown
};
enum class pix_fmt_e {
yuv420p, ///< YUV 4:2:0
yuv420p10, ///< YUV 4:2:0 10-bit
nv12, ///< NV12
p010, ///< P010
ayuv, ///< AYUV
yuv444p16, ///< Planar 10-bit (shifted to 16-bit) YUV 4:4:4
y410, ///< Y410
unknown ///< Unknown
};
inline std::string_view
from_pix_fmt(pix_fmt_e pix_fmt) {
using namespace std::literals;
#define _CONVERT(x) \
case pix_fmt_e::x: \
return #x##sv
switch (pix_fmt) {
_CONVERT(yuv420p);
_CONVERT(yuv420p10);
_CONVERT(nv12);
_CONVERT(p010);
_CONVERT(ayuv);
_CONVERT(yuv444p16);
_CONVERT(y410);
_CONVERT(unknown);
}
#undef _CONVERT
return "unknown"sv;
}
// Dimensions for touchscreen input
struct touch_port_t {
int offset_x, offset_y;
int width, height;
};
// These values must match Limelight-internal.h's SS_FF_* constants!
namespace platform_caps {
typedef uint32_t caps_t;
constexpr caps_t pen_touch = 0x01; // Pen and touch events
constexpr caps_t controller_touch = 0x02; // Controller touch events
}; // namespace platform_caps
struct gamepad_state_t {
std::uint32_t buttonFlags;
std::uint8_t lt;
std::uint8_t rt;
std::int16_t lsX;
std::int16_t lsY;
std::int16_t rsX;
std::int16_t rsY;
};
struct gamepad_id_t {
// The global index is used when looking up gamepads in the platform's
// gamepad array. It identifies gamepads uniquely among all clients.
int globalIndex;
// The client-relative index is the controller number as reported by the
// client. It must be used when communicating back to the client via
// the input feedback queue.
std::uint8_t clientRelativeIndex;
};
struct gamepad_arrival_t {
std::uint8_t type;
std::uint16_t capabilities;
std::uint32_t supportedButtons;
};
struct gamepad_touch_t {
gamepad_id_t id;
std::uint8_t eventType;
std::uint32_t pointerId;
float x;
float y;
float pressure;
};
struct gamepad_motion_t {
gamepad_id_t id;
std::uint8_t motionType;
// Accel: m/s^2
// Gyro: deg/s
float x;
float y;
float z;
};
struct gamepad_battery_t {
gamepad_id_t id;
std::uint8_t state;
std::uint8_t percentage;
};
struct touch_input_t {
std::uint8_t eventType;
std::uint16_t rotation; // Degrees (0..360) or LI_ROT_UNKNOWN
std::uint32_t pointerId;
float x;
float y;
float pressureOrDistance; // Distance for hover and pressure for contact
float contactAreaMajor;
float contactAreaMinor;
};
struct pen_input_t {
std::uint8_t eventType;
std::uint8_t toolType;
std::uint8_t penButtons;
std::uint8_t tilt; // Degrees (0..90) or LI_TILT_UNKNOWN
std::uint16_t rotation; // Degrees (0..360) or LI_ROT_UNKNOWN
float x;
float y;
float pressureOrDistance; // Distance for hover and pressure for contact
float contactAreaMajor;
float contactAreaMinor;
};
class deinit_t {
public:
virtual ~deinit_t() = default;
};
struct img_t: std::enable_shared_from_this<img_t> {
public:
img_t() = default;
img_t(img_t &&) = delete;
img_t(const img_t &) = delete;
img_t &
operator=(img_t &&) = delete;
img_t &
operator=(const img_t &) = delete;
std::uint8_t *data {};
std::int32_t width {};
std::int32_t height {};
std::int32_t pixel_pitch {};
std::int32_t row_pitch {};
std::optional<std::chrono::steady_clock::time_point> frame_timestamp;
virtual ~img_t() = default;
};
struct sink_t {
// Play on host PC
std::string host;
// On macOS and Windows, it is not possible to create a virtual sink
// Therefore, it is optional
struct null_t {
std::string stereo;
std::string surround51;
std::string surround71;
};
std::optional<null_t> null;
};
struct encode_device_t {
virtual ~encode_device_t() = default;
virtual int
convert(platf::img_t &img) = 0;
video::sunshine_colorspace_t colorspace;
};
struct avcodec_encode_device_t: encode_device_t {
void *data {};
AVFrame *frame {};
int
convert(platf::img_t &img) override {
return -1;
}
virtual void
apply_colorspace() {
}
/**
* @brief Set the frame to be encoded.
* @note Implementations must take ownership of 'frame'.
*/
virtual int
set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx) {
BOOST_LOG(error) << "Illegal call to hwdevice_t::set_frame(). Did you forget to override it?";
return -1;
};
/**
* @brief Initialize the hwframes context.
* @note Implementations may set parameters during initialization of the hwframes context.
*/
virtual void
init_hwframes(AVHWFramesContext *frames) {};
/**
* @brief Provides a hook for allow platform-specific code to adjust codec options.
* @note Implementations may set or modify codec options prior to codec initialization.
*/
virtual void
init_codec_options(AVCodecContext *ctx, AVDictionary **options) {};
/**
* @brief Prepare to derive a context.
* @note Implementations may make modifications required before context derivation
*/
virtual int
prepare_to_derive_context(int hw_device_type) {
return 0;
};
};
struct nvenc_encode_device_t: encode_device_t {
virtual bool
init_encoder(const video::config_t &client_config, const video::sunshine_colorspace_t &colorspace) = 0;
nvenc::nvenc_base *nvenc = nullptr;
};
enum class capture_e : int {
ok, ///< Success
reinit, ///< Need to reinitialize
timeout, ///< Timeout
interrupted, ///< Capture was interrupted
error ///< Error
};
class display_t {
public:
/**
* @brief Callback for when a new image is ready.
* When display has a new image ready or a timeout occurs, this callback will be called with the image.
* If a frame was captured, frame_captured will be true. If a timeout occurred, it will be false.
* @retval true On success
* @retval false On break request
*/
using push_captured_image_cb_t = std::function<bool(std::shared_ptr<img_t> &&img, bool frame_captured)>;
/**
* @brief Get free image from pool.
* Calls must be synchronized.
* Blocks until there is free image in the pool or capture is interrupted.
* @retval true On success, img_out contains free image
* @retval false When capture has been interrupted, img_out contains nullptr
*/
using pull_free_image_cb_t = std::function<bool(std::shared_ptr<img_t> &img_out)>;
display_t() noexcept:
offset_x { 0 }, offset_y { 0 } {}
/**
* @brief Capture a frame.
* @param push_captured_image_cb The callback that is called with captured image,
* must be called from the same thread as capture()
* @param pull_free_image_cb Capture backends call this callback to get empty image from the pool.
* If backend uses multiple threads, calls to this callback must be synchronized.
* Calls to this callback and push_captured_image_cb must be synchronized as well.
* @param cursor A pointer to the flag that indicates whether the cursor should be captured as well.
* @retval capture_e::ok When stopping
* @retval capture_e::error On error
* @retval capture_e::reinit When need of reinitialization
*/
virtual capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) = 0;
virtual std::shared_ptr<img_t>
alloc_img() = 0;
virtual int
dummy_img(img_t *img) = 0;
virtual std::unique_ptr<avcodec_encode_device_t>
make_avcodec_encode_device(pix_fmt_e pix_fmt) {
return nullptr;
}
virtual std::unique_ptr<nvenc_encode_device_t>
make_nvenc_encode_device(pix_fmt_e pix_fmt) {
return nullptr;
}
virtual bool
is_hdr() {
return false;
}
virtual bool
get_hdr_metadata(SS_HDR_METADATA &metadata) {
std::memset(&metadata, 0, sizeof(metadata));
return false;
}
/**
* @brief Check that a given codec is supported by the display device.
* @param name The FFmpeg codec name (or similar for non-FFmpeg codecs).
* @param config The codec configuration.
* @return `true` if supported, `false` otherwise.
*/
virtual bool
is_codec_supported(std::string_view name, const ::video::config_t &config) {
return true;
}
virtual ~display_t() = default;
// Offsets for when streaming a specific monitor. By default, they are 0.
int offset_x, offset_y;
int env_width, env_height;
int width, height;
protected:
// collect capture timing data (at loglevel debug)
logging::time_delta_periodic_logger sleep_overshoot_logger = { debug, "Frame capture sleep overshoot" };
};
class mic_t {
public:
virtual capture_e
sample(std::vector<float> &frame_buffer) = 0;
virtual ~mic_t() = default;
};
class audio_control_t {
public:
virtual int
set_sink(const std::string &sink) = 0;
virtual std::unique_ptr<mic_t>
microphone(const std::uint8_t *mapping, int channels, std::uint32_t sample_rate, std::uint32_t frame_size) = 0;
virtual std::optional<sink_t>
sink_info() = 0;
virtual ~audio_control_t() = default;
};
void
freeInput(void *);
using input_t = util::safe_ptr<void, freeInput>;
std::filesystem::path
appdata();
std::string
get_mac_address(const std::string_view &address);
std::string
from_sockaddr(const sockaddr *const);
std::pair<std::uint16_t, std::string>
from_sockaddr_ex(const sockaddr *const);
std::unique_ptr<audio_control_t>
audio_control();
/**
* @brief Get the display_t instance for the given hwdevice_type.
* If display_name is empty, use the first monitor that's compatible you can find
* If you require to use this parameter in a separate thread, make a copy of it.
* @param display_name The name of the monitor that SHOULD be displayed
* @param config Stream configuration
* @return The display_t instance based on hwdevice_type.
*/
std::shared_ptr<display_t>
display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
// A list of names of displays accepted as display_name with the mem_type_e
std::vector<std::string>
display_names(mem_type_e hwdevice_type);
/**
* @brief Check if GPUs/drivers have changed since the last call to this function.
* @return `true` if a change has occurred or if it is unknown whether a change occurred.
*/
bool
needs_encoder_reenumeration();
boost::process::v1::child
run_command(bool elevated, bool interactive, const std::string &cmd, boost::filesystem::path &working_dir, const boost::process::v1::environment &env, FILE *file, std::error_code &ec, boost::process::v1::group *group);
enum class thread_priority_e : int {
low, ///< Low priority
normal, ///< Normal priority
high, ///< High priority
critical ///< Critical priority
};
void
adjust_thread_priority(thread_priority_e priority);
// Allow OS-specific actions to be taken to prepare for streaming
void
streaming_will_start();
void
streaming_will_stop();
void
restart();
/**
* @brief Set an environment variable.
* @param name The name of the environment variable.
* @param value The value to set the environment variable to.
* @return 0 on success, non-zero on failure.
*/
int
set_env(const std::string &name, const std::string &value);
/**
* @brief Unset an environment variable.
* @param name The name of the environment variable.
* @return 0 on success, non-zero on failure.
*/
int
unset_env(const std::string &name);
struct buffer_descriptor_t {
const char *buffer;
size_t size;
// Constructors required for emplace_back() prior to C++20
buffer_descriptor_t(const char *buffer, size_t size):
buffer(buffer), size(size) {}
buffer_descriptor_t():
buffer(nullptr), size(0) {}
};
struct batched_send_info_t {
// Optional headers to be prepended to each packet
const char *headers;
size_t header_size;
// One or more data buffers to use for the payloads
//
// NB: Data buffers must be aligned to payload size!
std::vector<buffer_descriptor_t> &payload_buffers;
size_t payload_size;
// The offset (in header+payload message blocks) in the header and payload
// buffers to begin sending messages from
size_t block_offset;
// The number of header+payload message blocks to send
size_t block_count;
std::uintptr_t native_socket;
boost::asio::ip::address &target_address;
uint16_t target_port;
boost::asio::ip::address &source_address;
/**
* @brief Returns a payload buffer descriptor for the given payload offset.
* @param offset The offset in the total payload data (bytes).
* @return Buffer descriptor describing the region at the given offset.
*/
buffer_descriptor_t
buffer_for_payload_offset(ptrdiff_t offset) {
for (const auto &desc : payload_buffers) {
if (offset < desc.size) {
return {
desc.buffer + offset,
desc.size - offset,
};
}
else {
offset -= desc.size;
}
}
return {};
}
};
bool
send_batch(batched_send_info_t &send_info);
struct send_info_t {
const char *header;
size_t header_size;
const char *payload;
size_t payload_size;
std::uintptr_t native_socket;
boost::asio::ip::address &target_address;
uint16_t target_port;
boost::asio::ip::address &source_address;
};
bool
send(send_info_t &send_info);
enum class qos_data_type_e : int {
audio, ///< Audio
video ///< Video
};
/**
* @brief Enable QoS on the given socket for traffic to the specified destination.
* @param native_socket The native socket handle.
* @param address The destination address for traffic sent on this socket.
* @param port The destination port for traffic sent on this socket.
* @param data_type The type of traffic sent on this socket.
* @param dscp_tagging Specifies whether to enable DSCP tagging on outgoing traffic.
*/
std::unique_ptr<deinit_t>
enable_socket_qos(uintptr_t native_socket, boost::asio::ip::address &address, uint16_t port, qos_data_type_e data_type, bool dscp_tagging);
/**
* @brief Open a url in the default web browser.
* @param url The url to open.
*/
void
open_url(const std::string &url);
/**
* @brief Attempt to gracefully terminate a process group.
* @param native_handle The native handle of the process group.
* @return `true` if termination was successfully requested.
*/
bool
request_process_group_exit(std::uintptr_t native_handle);
/**
* @brief Check if a process group still has running children.
* @param native_handle The native handle of the process group.
* @return `true` if processes are still running.
*/
bool
process_group_running(std::uintptr_t native_handle);
input_t
input();
/**
* @brief Get the current mouse position on screen
* @param input The input_t instance to use.
* @return Screen coordinates of the mouse.
* @examples
* auto [x, y] = get_mouse_loc(input);
* @examples_end
*/
util::point_t
get_mouse_loc(input_t &input);
void
move_mouse(input_t &input, int deltaX, int deltaY);
void
abs_mouse(input_t &input, const touch_port_t &touch_port, float x, float y);
void
button_mouse(input_t &input, int button, bool release);
void
scroll(input_t &input, int distance);
void
hscroll(input_t &input, int distance);
void
keyboard_update(input_t &input, uint16_t modcode, bool release, uint8_t flags);
void
gamepad_update(input_t &input, int nr, const gamepad_state_t &gamepad_state);
void
unicode(input_t &input, char *utf8, int size);
typedef deinit_t client_input_t;
/**
* @brief Allocate a context to store per-client input data.
* @param input The global input context.
* @return A unique pointer to a per-client input data context.
*/
std::unique_ptr<client_input_t>
allocate_client_input_context(input_t &input);
/**
* @brief Send a touch event to the OS.
* @param input The client-specific input context.
* @param touch_port The current viewport for translating to screen coordinates.
* @param touch The touch event.
*/
void
touch_update(client_input_t *input, const touch_port_t &touch_port, const touch_input_t &touch);
/**
* @brief Send a pen event to the OS.
* @param input The client-specific input context.
* @param touch_port The current viewport for translating to screen coordinates.
* @param pen The pen event.
*/
void
pen_update(client_input_t *input, const touch_port_t &touch_port, const pen_input_t &pen);
/**
* @brief Send a gamepad touch event to the OS.
* @param input The global input context.
* @param touch The touch event.
*/
void
gamepad_touch(input_t &input, const gamepad_touch_t &touch);
/**
* @brief Send a gamepad motion event to the OS.
* @param input The global input context.
* @param motion The motion event.
*/
void
gamepad_motion(input_t &input, const gamepad_motion_t &motion);
/**
* @brief Send a gamepad battery event to the OS.
* @param input The global input context.
* @param battery The battery event.
*/
void
gamepad_battery(input_t &input, const gamepad_battery_t &battery);
/**
* @brief Create a new virtual gamepad.
* @param input The global input context.
* @param id The gamepad ID.
* @param metadata Controller metadata from client (empty if none provided).
* @param feedback_queue The queue for posting messages back to the client.
* @return 0 on success.
*/
int
alloc_gamepad(input_t &input, const gamepad_id_t &id, const gamepad_arrival_t &metadata, feedback_queue_t feedback_queue);
void
free_gamepad(input_t &input, int nr);
/**
* @brief Get the supported platform capabilities to advertise to the client.
* @return Capability flags.
*/
platform_caps::caps_t
get_capabilities();
#define SERVICE_NAME "Sunshine"
#define SERVICE_TYPE "_nvstream._tcp"
namespace publish {
[[nodiscard]] std::unique_ptr<deinit_t>
start();
}
[[nodiscard]] std::unique_ptr<deinit_t>
init();
/**
* @brief Returns the current computer name in UTF-8.
* @return Computer name or a placeholder upon failure.
*/
std::string
get_host_name();
/**
* @brief Gets the supported gamepads for this platform backend.
* @details This may be called prior to `platf::input()`!
* @param input Pointer to the platform's `input_t` or `nullptr`.
* @return Vector of gamepad options and status.
*/
std::vector<supported_gamepad_t> &
supported_gamepads(input_t *input);
struct high_precision_timer: private boost::noncopyable {
virtual ~high_precision_timer() = default;
/**
* @brief Sleep for the duration
* @param duration Sleep duration
*/
virtual void
sleep_for(const std::chrono::nanoseconds &duration) = 0;
/**
* @brief Check if platform-specific timer backend has been initialized successfully
* @return `true` on success, `false` on error
*/
virtual
operator bool() = 0;
};
/**
* @brief Create platform-specific timer capable of high-precision sleep
* @return A unique pointer to timer
*/
std::unique_ptr<high_precision_timer>
create_high_precision_timer();
} // namespace platf
| 25,457
|
C++
|
.h
| 774
| 28.257106
| 220
| 0.673753
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,071
|
wayland.h
|
LizardByte_Sunshine/src/platform/linux/wayland.h
|
/**
* @file src/platform/linux/wayland.h
* @brief Declarations for Wayland capture.
*/
#pragma once
#include <bitset>
#ifdef SUNSHINE_BUILD_WAYLAND
#include <wlr-export-dmabuf-unstable-v1.h>
#include <xdg-output-unstable-v1.h>
#endif
#include "graphics.h"
/**
* The classes defined in this macro block should only be used by
* cpp files whose compilation depends on SUNSHINE_BUILD_WAYLAND
*/
#ifdef SUNSHINE_BUILD_WAYLAND
namespace wl {
using display_internal_t = util::safe_ptr<wl_display, wl_display_disconnect>;
class frame_t {
public:
frame_t();
egl::surface_descriptor_t sd;
void
destroy();
};
class dmabuf_t {
public:
enum status_e {
WAITING, ///< Waiting for a frame
READY, ///< Frame is ready
REINIT, ///< Reinitialize the frame
};
dmabuf_t(dmabuf_t &&) = delete;
dmabuf_t(const dmabuf_t &) = delete;
dmabuf_t &
operator=(const dmabuf_t &) = delete;
dmabuf_t &
operator=(dmabuf_t &&) = delete;
dmabuf_t();
void
listen(zwlr_export_dmabuf_manager_v1 *dmabuf_manager, wl_output *output, bool blend_cursor = false);
~dmabuf_t();
void
frame(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t width, std::uint32_t height,
std::uint32_t x, std::uint32_t y,
std::uint32_t buffer_flags, std::uint32_t flags,
std::uint32_t format,
std::uint32_t high, std::uint32_t low,
std::uint32_t obj_count);
void
object(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t index,
std::int32_t fd,
std::uint32_t size,
std::uint32_t offset,
std::uint32_t stride,
std::uint32_t plane_index);
void
ready(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t tv_sec_hi, std::uint32_t tv_sec_lo, std::uint32_t tv_nsec);
void
cancel(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t reason);
inline frame_t *
get_next_frame() {
return current_frame == &frames[0] ? &frames[1] : &frames[0];
}
status_e status;
std::array<frame_t, 2> frames;
frame_t *current_frame;
zwlr_export_dmabuf_frame_v1_listener listener;
};
class monitor_t {
public:
monitor_t(monitor_t &&) = delete;
monitor_t(const monitor_t &) = delete;
monitor_t &
operator=(const monitor_t &) = delete;
monitor_t &
operator=(monitor_t &&) = delete;
monitor_t(wl_output *output);
void
xdg_name(zxdg_output_v1 *, const char *name);
void
xdg_description(zxdg_output_v1 *, const char *description);
void
xdg_position(zxdg_output_v1 *, std::int32_t x, std::int32_t y);
void
xdg_size(zxdg_output_v1 *, std::int32_t width, std::int32_t height);
void
xdg_done(zxdg_output_v1 *) {}
void
wl_geometry(wl_output *wl_output, std::int32_t x, std::int32_t y,
std::int32_t physical_width, std::int32_t physical_height, std::int32_t subpixel,
const char *make, const char *model, std::int32_t transform) {}
void
wl_mode(wl_output *wl_output, std::uint32_t flags,
std::int32_t width, std::int32_t height, std::int32_t refresh);
void
wl_done(wl_output *wl_output) {}
void
wl_scale(wl_output *wl_output, std::int32_t factor) {}
void
listen(zxdg_output_manager_v1 *output_manager);
wl_output *output;
std::string name;
std::string description;
platf::touch_port_t viewport;
wl_output_listener wl_listener;
zxdg_output_v1_listener xdg_listener;
};
class interface_t {
struct bind_t {
std::uint32_t id;
std::uint32_t version;
};
public:
enum interface_e {
XDG_OUTPUT, ///< xdg-output
WLR_EXPORT_DMABUF, ///< Export dmabuf
MAX_INTERFACES, ///< Maximum number of interfaces
};
interface_t(interface_t &&) = delete;
interface_t(const interface_t &) = delete;
interface_t &
operator=(const interface_t &) = delete;
interface_t &
operator=(interface_t &&) = delete;
interface_t() noexcept;
void
listen(wl_registry *registry);
std::vector<std::unique_ptr<monitor_t>> monitors;
zwlr_export_dmabuf_manager_v1 *dmabuf_manager;
zxdg_output_manager_v1 *output_manager;
bool
operator[](interface_e bit) const {
return interface[bit];
}
private:
void
add_interface(wl_registry *registry, std::uint32_t id, const char *interface, std::uint32_t version);
void
del_interface(wl_registry *registry, uint32_t id);
std::bitset<MAX_INTERFACES> interface;
wl_registry_listener listener;
};
class display_t {
public:
/**
* @brief Initialize display.
* If display_name == nullptr -> display_name = std::getenv("WAYLAND_DISPLAY")
* @param display_name The name of the display.
* @return 0 on success, -1 on failure.
*/
int
init(const char *display_name = nullptr);
// Roundtrip with Wayland connection
void
roundtrip();
// Wait up to the timeout to read and dispatch new events
bool
dispatch(std::chrono::milliseconds timeout);
// Get the registry associated with the display
// No need to manually free the registry
wl_registry *
registry();
inline display_internal_t::pointer
get() {
return display_internal.get();
}
private:
display_internal_t display_internal;
};
std::vector<std::unique_ptr<monitor_t>>
monitors(const char *display_name = nullptr);
int
init();
} // namespace wl
#else
struct wl_output;
struct zxdg_output_manager_v1;
namespace wl {
class monitor_t {
public:
monitor_t(monitor_t &&) = delete;
monitor_t(const monitor_t &) = delete;
monitor_t &
operator=(const monitor_t &) = delete;
monitor_t &
operator=(monitor_t &&) = delete;
monitor_t(wl_output *output);
void
listen(zxdg_output_manager_v1 *output_manager);
wl_output *output;
std::string name;
std::string description;
platf::touch_port_t viewport;
};
inline std::vector<std::unique_ptr<monitor_t>>
monitors(const char *display_name = nullptr) { return {}; }
inline int
init() { return -1; }
} // namespace wl
#endif
| 6,212
|
C++
|
.h
| 209
| 24.789474
| 105
| 0.654662
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,072
|
graphics.h
|
LizardByte_Sunshine/src/platform/linux/graphics.h
|
/**
* @file src/platform/linux/graphics.h
* @brief Declarations for graphics related functions.
*/
#pragma once
#include <optional>
#include <string_view>
#include <glad/egl.h>
#include <glad/gl.h>
#include "misc.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include "src/video_colorspace.h"
#define SUNSHINE_STRINGIFY_HELPER(x) #x
#define SUNSHINE_STRINGIFY(x) SUNSHINE_STRINGIFY_HELPER(x)
#define gl_drain_errors_helper(x) gl::drain_errors(x)
#define gl_drain_errors gl_drain_errors_helper(__FILE__ ":" SUNSHINE_STRINGIFY(__LINE__))
extern "C" int
close(int __fd);
// X11 Display
extern "C" struct _XDisplay;
struct AVFrame;
void
free_frame(AVFrame *frame);
using frame_t = util::safe_ptr<AVFrame, free_frame>;
namespace gl {
extern GladGLContext ctx;
void
drain_errors(const std::string_view &prefix);
class tex_t: public util::buffer_t<GLuint> {
using util::buffer_t<GLuint>::buffer_t;
public:
tex_t(tex_t &&) = default;
tex_t &
operator=(tex_t &&) = default;
~tex_t();
static tex_t
make(std::size_t count);
};
class frame_buf_t: public util::buffer_t<GLuint> {
using util::buffer_t<GLuint>::buffer_t;
public:
frame_buf_t(frame_buf_t &&) = default;
frame_buf_t &
operator=(frame_buf_t &&) = default;
~frame_buf_t();
static frame_buf_t
make(std::size_t count);
inline void
bind(std::nullptr_t, std::nullptr_t) {
int x = 0;
for (auto fb : (*this)) {
ctx.BindFramebuffer(GL_FRAMEBUFFER, fb);
ctx.FramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + x, 0, 0);
++x;
}
return;
}
template <class It>
void
bind(It it_begin, It it_end) {
using namespace std::literals;
if (std::distance(it_begin, it_end) > size()) {
BOOST_LOG(warning) << "To many elements to bind"sv;
return;
}
int x = 0;
std::for_each(it_begin, it_end, [&](auto tex) {
ctx.BindFramebuffer(GL_FRAMEBUFFER, (*this)[x]);
ctx.BindTexture(GL_TEXTURE_2D, tex);
ctx.FramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + x, tex, 0);
++x;
});
}
/**
* Copies a part of the framebuffer to texture
*/
void
copy(int id, int texture, int offset_x, int offset_y, int width, int height);
};
class shader_t {
KITTY_USING_MOVE_T(shader_internal_t, GLuint, std::numeric_limits<GLuint>::max(), {
if (el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteShader(el);
}
});
public:
std::string
err_str();
static util::Either<shader_t, std::string>
compile(const std::string_view &source, GLenum type);
GLuint
handle() const;
private:
shader_internal_t _shader;
};
class buffer_t {
KITTY_USING_MOVE_T(buffer_internal_t, GLuint, std::numeric_limits<GLuint>::max(), {
if (el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteBuffers(1, &el);
}
});
public:
static buffer_t
make(util::buffer_t<GLint> &&offsets, const char *block, const std::string_view &data);
GLuint
handle() const;
const char *
block() const;
void
update(const std::string_view &view, std::size_t offset = 0);
void
update(std::string_view *members, std::size_t count, std::size_t offset = 0);
private:
const char *_block;
std::size_t _size;
util::buffer_t<GLint> _offsets;
buffer_internal_t _buffer;
};
class program_t {
KITTY_USING_MOVE_T(program_internal_t, GLuint, std::numeric_limits<GLuint>::max(), {
if (el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteProgram(el);
}
});
public:
std::string
err_str();
static util::Either<program_t, std::string>
link(const shader_t &vert, const shader_t &frag);
void
bind(const buffer_t &buffer);
std::optional<buffer_t>
uniform(const char *block, std::pair<const char *, std::string_view> *members, std::size_t count);
GLuint
handle() const;
private:
program_internal_t _program;
};
} // namespace gl
namespace gbm {
struct device;
typedef void (*device_destroy_fn)(device *gbm);
typedef device *(*create_device_fn)(int fd);
extern device_destroy_fn device_destroy;
extern create_device_fn create_device;
using gbm_t = util::dyn_safe_ptr<device, &device_destroy>;
int
init();
} // namespace gbm
namespace egl {
using display_t = util::dyn_safe_ptr_v2<void, EGLBoolean, &eglTerminate>;
struct rgb_img_t {
display_t::pointer display;
EGLImage xrgb8;
gl::tex_t tex;
};
struct nv12_img_t {
display_t::pointer display;
EGLImage r8;
EGLImage bg88;
gl::tex_t tex;
gl::frame_buf_t buf;
// sizeof(va::DRMPRIMESurfaceDescriptor::objects) / sizeof(va::DRMPRIMESurfaceDescriptor::objects[0]);
static constexpr std::size_t num_fds = 4;
std::array<file_t, num_fds> fds;
};
KITTY_USING_MOVE_T(rgb_t, rgb_img_t, , {
if (el.xrgb8) {
eglDestroyImage(el.display, el.xrgb8);
}
});
KITTY_USING_MOVE_T(nv12_t, nv12_img_t, , {
if (el.r8) {
eglDestroyImage(el.display, el.r8);
}
if (el.bg88) {
eglDestroyImage(el.display, el.bg88);
}
});
KITTY_USING_MOVE_T(ctx_t, (std::tuple<display_t::pointer, EGLContext>), , {
TUPLE_2D_REF(disp, ctx, el);
if (ctx) {
eglMakeCurrent(disp, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
eglDestroyContext(disp, ctx);
}
});
struct surface_descriptor_t {
int width;
int height;
int fds[4];
std::uint32_t fourcc;
std::uint64_t modifier;
std::uint32_t pitches[4];
std::uint32_t offsets[4];
};
display_t
make_display(std::variant<gbm::gbm_t::pointer, wl_display *, _XDisplay *> native_display);
std::optional<ctx_t>
make_ctx(display_t::pointer display);
std::optional<rgb_t>
import_source(
display_t::pointer egl_display,
const surface_descriptor_t &xrgb);
rgb_t
create_blank(platf::img_t &img);
std::optional<nv12_t>
import_target(
display_t::pointer egl_display,
std::array<file_t, nv12_img_t::num_fds> &&fds,
const surface_descriptor_t &y, const surface_descriptor_t &uv);
/**
* @brief Creates biplanar YUV textures to render into.
* @param width Width of the target frame.
* @param height Height of the target frame.
* @param format Format of the target frame.
* @return The new RGB texture.
*/
std::optional<nv12_t>
create_target(int width, int height, AVPixelFormat format);
class cursor_t: public platf::img_t {
public:
int x, y;
int src_w, src_h;
unsigned long serial;
std::vector<std::uint8_t> buffer;
};
// Allow cursor and the underlying image to be kept together
class img_descriptor_t: public cursor_t {
public:
~img_descriptor_t() {
reset();
}
void
reset() {
for (auto x = 0; x < 4; ++x) {
if (sd.fds[x] >= 0) {
close(sd.fds[x]);
sd.fds[x] = -1;
}
}
}
surface_descriptor_t sd;
// Increment sequence when new rgb_t needs to be created
std::uint64_t sequence;
};
class sws_t {
public:
static std::optional<sws_t>
make(int in_width, int in_height, int out_width, int out_height, gl::tex_t &&tex);
static std::optional<sws_t>
make(int in_width, int in_height, int out_width, int out_height, AVPixelFormat format);
// Convert the loaded image into the first two framebuffers
int
convert(gl::frame_buf_t &fb);
// Make an area of the image black
int
blank(gl::frame_buf_t &fb, int offsetX, int offsetY, int width, int height);
void
load_ram(platf::img_t &img);
void
load_vram(img_descriptor_t &img, int offset_x, int offset_y, int texture);
void
apply_colorspace(const video::sunshine_colorspace_t &colorspace);
// The first texture is the monitor image.
// The second texture is the cursor image
gl::tex_t tex;
// The cursor image will be blended into this framebuffer
gl::frame_buf_t cursor_framebuffer;
gl::frame_buf_t copy_framebuffer;
// Y - shader, UV - shader, Cursor - shader
gl::program_t program[3];
gl::buffer_t color_matrix;
int out_width, out_height;
int in_width, in_height;
int offsetX, offsetY;
// Pointer to the texture to be converted to nv12
int loaded_texture;
// Store latest cursor for load_vram
std::uint64_t serial;
};
bool
fail();
} // namespace egl
| 8,578
|
C++
|
.h
| 285
| 25.308772
| 106
| 0.648062
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,073
|
cuda.h
|
LizardByte_Sunshine/src/platform/linux/cuda.h
|
/**
* @file src/platform/linux/cuda.h
* @brief Definitions for CUDA implementation.
*/
#pragma once
#if defined(SUNSHINE_BUILD_CUDA)
#include "src/video_colorspace.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
namespace platf {
class avcodec_encode_device_t;
class img_t;
} // namespace platf
namespace cuda {
namespace nvfbc {
std::vector<std::string>
display_names();
}
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, bool vram);
/**
* @brief Create a GL->CUDA encoding device for consuming captured dmabufs.
* @param in_width Width of captured frames.
* @param in_height Height of captured frames.
* @param offset_x Offset of content in captured frame.
* @param offset_y Offset of content in captured frame.
* @return FFmpeg encoding device context.
*/
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_gl_encode_device(int width, int height, int offset_x, int offset_y);
int
init();
} // namespace cuda
typedef struct cudaArray *cudaArray_t;
#if !defined(__CUDACC__)
typedef struct CUstream_st *cudaStream_t;
typedef unsigned long long cudaTextureObject_t;
#else /* defined(__CUDACC__) */
typedef __location__(device_builtin) struct CUstream_st *cudaStream_t;
typedef __location__(device_builtin) unsigned long long cudaTextureObject_t;
#endif /* !defined(__CUDACC__) */
namespace cuda {
class freeCudaPtr_t {
public:
void
operator()(void *ptr);
};
class freeCudaStream_t {
public:
void
operator()(cudaStream_t ptr);
};
using ptr_t = std::unique_ptr<void, freeCudaPtr_t>;
using stream_t = std::unique_ptr<CUstream_st, freeCudaStream_t>;
stream_t
make_stream(int flags = 0);
struct viewport_t {
int width, height;
int offsetX, offsetY;
};
class tex_t {
public:
static std::optional<tex_t>
make(int height, int pitch);
tex_t();
tex_t(tex_t &&);
tex_t &
operator=(tex_t &&other);
~tex_t();
int
copy(std::uint8_t *src, int height, int pitch);
cudaArray_t array;
struct texture {
cudaTextureObject_t point;
cudaTextureObject_t linear;
} texture;
};
class sws_t {
public:
sws_t() = default;
sws_t(int in_width, int in_height, int out_width, int out_height, int pitch, int threadsPerBlock, ptr_t &&color_matrix);
/**
* in_width, in_height -- The width and height of the captured image in pixels
* out_width, out_height -- the width and height of the NV12 image in pixels
*
* pitch -- The size of a single row of pixels in bytes
*/
static std::optional<sws_t>
make(int in_width, int in_height, int out_width, int out_height, int pitch);
// Converts loaded image into a CUDevicePtr
int
convert(std::uint8_t *Y, std::uint8_t *UV, std::uint32_t pitchY, std::uint32_t pitchUV, cudaTextureObject_t texture, stream_t::pointer stream);
int
convert(std::uint8_t *Y, std::uint8_t *UV, std::uint32_t pitchY, std::uint32_t pitchUV, cudaTextureObject_t texture, stream_t::pointer stream, const viewport_t &viewport);
void
apply_colorspace(const video::sunshine_colorspace_t &colorspace);
int
load_ram(platf::img_t &img, cudaArray_t array);
ptr_t color_matrix;
int threadsPerBlock;
viewport_t viewport;
float scale;
};
} // namespace cuda
#endif
| 3,464
|
C++
|
.h
| 108
| 28.101852
| 175
| 0.69284
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,074
|
misc.h
|
LizardByte_Sunshine/src/platform/linux/misc.h
|
/**
* @file src/platform/linux/misc.h
* @brief Miscellaneous declarations for Linux.
*/
#pragma once
#include <unistd.h>
#include <vector>
#include "src/utility.h"
KITTY_USING_MOVE_T(file_t, int, -1, {
if (el >= 0) {
close(el);
}
});
enum class window_system_e {
NONE, ///< No window system
X11, ///< X11
WAYLAND, ///< Wayland
};
extern window_system_e window_system;
namespace dyn {
typedef void (*apiproc)(void);
int
load(void *handle, const std::vector<std::tuple<apiproc *, const char *>> &funcs, bool strict = true);
void *
handle(const std::vector<const char *> &libs);
} // namespace dyn
| 634
|
C++
|
.h
| 26
| 22.038462
| 104
| 0.67
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,075
|
x11grab.h
|
LizardByte_Sunshine/src/platform/linux/x11grab.h
|
/**
* @file src/platform/linux/x11grab.h
* @brief Declarations for x11 capture.
*/
#pragma once
#include <optional>
#include "src/platform/common.h"
#include "src/utility.h"
// X11 Display
extern "C" struct _XDisplay;
namespace egl {
class cursor_t;
}
namespace platf::x11 {
struct cursor_ctx_raw_t;
void
freeCursorCtx(cursor_ctx_raw_t *ctx);
void
freeDisplay(_XDisplay *xdisplay);
using cursor_ctx_t = util::safe_ptr<cursor_ctx_raw_t, freeCursorCtx>;
using xdisplay_t = util::safe_ptr<_XDisplay, freeDisplay>;
class cursor_t {
public:
static std::optional<cursor_t>
make();
void
capture(egl::cursor_t &img);
/**
* Capture and blend the cursor into the image
*
* img <-- destination image
* offsetX, offsetY <--- Top left corner of the virtual screen
*/
void
blend(img_t &img, int offsetX, int offsetY);
cursor_ctx_t ctx;
};
xdisplay_t
make_display();
} // namespace platf::x11
| 976
|
C++
|
.h
| 40
| 20.975
| 71
| 0.684324
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,076
|
vaapi.h
|
LizardByte_Sunshine/src/platform/linux/vaapi.h
|
/**
* @file src/platform/linux/vaapi.h
* @brief Declarations for VA-API hardware accelerated capture.
*/
#pragma once
#include "misc.h"
#include "src/platform/common.h"
namespace egl {
struct surface_descriptor_t;
}
namespace va {
/**
* Width --> Width of the image
* Height --> Height of the image
* offset_x --> Horizontal offset of the image in the texture
* offset_y --> Vertical offset of the image in the texture
* file_t card --> The file descriptor of the render device used for encoding
*/
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, bool vram);
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, int offset_x, int offset_y, bool vram);
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, file_t &&card, int offset_x, int offset_y, bool vram);
// Ensure the render device pointed to by fd is capable of encoding h264 with the hevc_mode configured
bool
validate(int fd);
} // namespace va
| 1,091
|
C++
|
.h
| 28
| 36.321429
| 106
| 0.729245
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,077
|
inputtino_touch.h
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_touch.h
|
/**
* @file src/platform/linux/input/inputtino_touch.h
* @brief Declarations for inputtino touch input handling.
*/
#pragma once
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/platform/common.h"
#include "inputtino_common.h"
using namespace std::literals;
namespace platf::touch {
void
update(client_input_raw_t *raw, const touch_port_t &touch_port, const touch_input_t &touch);
}
| 450
|
C++
|
.h
| 15
| 28.2
| 94
| 0.769767
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,078
|
inputtino_keyboard.h
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_keyboard.h
|
/**
* @file src/platform/linux/input/inputtino_keyboard.h
* @brief Declarations for inputtino keyboard input handling.
*/
#pragma once
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "inputtino_common.h"
using namespace std::literals;
namespace platf::keyboard {
void
update(input_raw_t *raw, uint16_t modcode, bool release, uint8_t flags);
void
unicode(input_raw_t *raw, char *utf8, int size);
} // namespace platf::keyboard
| 494
|
C++
|
.h
| 16
| 28.875
| 74
| 0.761099
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,079
|
inputtino_mouse.h
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_mouse.h
|
/**
* @file src/platform/linux/input/inputtino_mouse.h
* @brief Declarations for inputtino mouse input handling.
*/
#pragma once
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/platform/common.h"
#include "inputtino_common.h"
using namespace std::literals;
namespace platf::mouse {
void
move(input_raw_t *raw, int deltaX, int deltaY);
void
move_abs(input_raw_t *raw, const touch_port_t &touch_port, float x, float y);
void
button(input_raw_t *raw, int button, bool release);
void
scroll(input_raw_t *raw, int high_res_distance);
void
hscroll(input_raw_t *raw, int high_res_distance);
util::point_t
get_location(input_raw_t *raw);
} // namespace platf::mouse
| 752
|
C++
|
.h
| 25
| 27.6
| 79
| 0.739191
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,080
|
inputtino_pen.h
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_pen.h
|
/**
* @file src/platform/linux/input/inputtino_pen.h
* @brief Declarations for inputtino pen input handling.
*/
#pragma once
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/platform/common.h"
#include "inputtino_common.h"
using namespace std::literals;
namespace platf::pen {
void
update(client_input_raw_t *raw, const touch_port_t &touch_port, const pen_input_t &pen);
}
| 440
|
C++
|
.h
| 15
| 27.533333
| 90
| 0.764286
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,081
|
inputtino_gamepad.h
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_gamepad.h
|
/**
* @file src/platform/linux/input/inputtino_gamepad.h
* @brief Declarations for inputtino gamepad input handling.
*/
#pragma once
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/platform/common.h"
#include "inputtino_common.h"
using namespace std::literals;
namespace platf::gamepad {
enum ControllerType {
XboxOneWired, ///< Xbox One Wired Controller
DualSenseWired, ///< DualSense Wired Controller
SwitchProWired ///< Switch Pro Wired Controller
};
int
alloc(input_raw_t *raw, const gamepad_id_t &id, const gamepad_arrival_t &metadata, feedback_queue_t feedback_queue);
void
free(input_raw_t *raw, int nr);
void
update(input_raw_t *raw, int nr, const gamepad_state_t &gamepad_state);
void
touch(input_raw_t *raw, const gamepad_touch_t &touch);
void
motion(input_raw_t *raw, const gamepad_motion_t &motion);
void
battery(input_raw_t *raw, const gamepad_battery_t &battery);
std::vector<supported_gamepad_t> &
supported_gamepads(input_t *input);
} // namespace platf::gamepad
| 1,099
|
C++
|
.h
| 32
| 31.46875
| 118
| 0.741935
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,082
|
inputtino_common.h
|
LizardByte_Sunshine/src/platform/linux/input/inputtino_common.h
|
/**
* @file src/platform/linux/input/inputtino_common.h
* @brief Declarations for inputtino common input handling.
*/
#pragma once
#include <boost/locale.hpp>
#include <inputtino/input.hpp>
#include <libevdev/libevdev.h>
#include "src/config.h"
#include "src/logging.h"
#include "src/platform/common.h"
#include "src/utility.h"
using namespace std::literals;
namespace platf {
using joypads_t = std::variant<inputtino::XboxOneJoypad, inputtino::SwitchJoypad, inputtino::PS5Joypad>;
struct joypad_state {
std::unique_ptr<joypads_t> joypad;
gamepad_feedback_msg_t last_rumble;
gamepad_feedback_msg_t last_rgb_led;
};
struct input_raw_t {
input_raw_t():
mouse(inputtino::Mouse::create({
.name = "Mouse passthrough",
.vendor_id = 0xBEEF,
.product_id = 0xDEAD,
.version = 0x111,
})),
keyboard(inputtino::Keyboard::create({
.name = "Keyboard passthrough",
.vendor_id = 0xBEEF,
.product_id = 0xDEAD,
.version = 0x111,
})),
gamepads(MAX_GAMEPADS) {
if (!mouse) {
BOOST_LOG(warning) << "Unable to create virtual mouse: " << mouse.getErrorMessage();
}
if (!keyboard) {
BOOST_LOG(warning) << "Unable to create virtual keyboard: " << keyboard.getErrorMessage();
}
}
~input_raw_t() = default;
// All devices are wrapped in Result because it might be that we aren't able to create them (ex: udev permission denied)
inputtino::Result<inputtino::Mouse> mouse;
inputtino::Result<inputtino::Keyboard> keyboard;
/**
* A list of gamepads that are currently connected.
* The pointer is shared because that state will be shared with background threads that deal with rumble and LED
*/
std::vector<std::shared_ptr<joypad_state>> gamepads;
};
struct client_input_raw_t: public client_input_t {
client_input_raw_t(input_t &input):
touch(inputtino::TouchScreen::create({
.name = "Touch passthrough",
.vendor_id = 0xBEEF,
.product_id = 0xDEAD,
.version = 0x111,
})),
pen(inputtino::PenTablet::create({
.name = "Pen passthrough",
.vendor_id = 0xBEEF,
.product_id = 0xDEAD,
.version = 0x111,
})) {
global = (input_raw_t *) input.get();
if (!touch) {
BOOST_LOG(warning) << "Unable to create virtual touch screen: " << touch.getErrorMessage();
}
if (!pen) {
BOOST_LOG(warning) << "Unable to create virtual pen tablet: " << pen.getErrorMessage();
}
}
input_raw_t *global;
// Device state and handles for pen and touch input must be stored in the per-client
// input context, because each connected client may be sending their own independent
// pen/touch events. To maintain separation, we expose separate pen and touch devices
// for each client.
inputtino::Result<inputtino::TouchScreen> touch;
inputtino::Result<inputtino::PenTablet> pen;
};
inline float
deg2rad(float degree) {
return degree * (M_PI / 180.f);
}
} // namespace platf
| 3,160
|
C++
|
.h
| 87
| 30.103448
| 124
| 0.644001
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,083
|
av_img_t.h
|
LizardByte_Sunshine/src/platform/macos/av_img_t.h
|
/**
* @file src/platform/macos/av_img_t.h
* @brief Declarations for AV image types on macOS.
*/
#pragma once
#include "src/platform/common.h"
#include <CoreMedia/CoreMedia.h>
#include <CoreVideo/CoreVideo.h>
namespace platf {
struct av_sample_buf_t {
CMSampleBufferRef buf;
explicit av_sample_buf_t(CMSampleBufferRef buf):
buf((CMSampleBufferRef) CFRetain(buf)) {}
~av_sample_buf_t() {
if (buf != nullptr) {
CFRelease(buf);
}
}
};
struct av_pixel_buf_t {
CVPixelBufferRef buf;
// Constructor
explicit av_pixel_buf_t(CMSampleBufferRef sb):
buf(
CMSampleBufferGetImageBuffer(sb)) {
CVPixelBufferLockBaseAddress(buf, kCVPixelBufferLock_ReadOnly);
}
[[nodiscard]] uint8_t *
data() const {
return static_cast<uint8_t *>(CVPixelBufferGetBaseAddress(buf));
}
// Destructor
~av_pixel_buf_t() {
if (buf != nullptr) {
CVPixelBufferUnlockBaseAddress(buf, kCVPixelBufferLock_ReadOnly);
}
}
};
struct av_img_t: img_t {
std::shared_ptr<av_sample_buf_t> sample_buffer;
std::shared_ptr<av_pixel_buf_t> pixel_buffer;
};
struct temp_retain_av_img_t {
std::shared_ptr<av_sample_buf_t> sample_buffer;
std::shared_ptr<av_pixel_buf_t> pixel_buffer;
uint8_t *data;
temp_retain_av_img_t(
std::shared_ptr<av_sample_buf_t> sb,
std::shared_ptr<av_pixel_buf_t> pb,
uint8_t *dt):
sample_buffer(std::move(sb)),
pixel_buffer(std::move(pb)), data(dt) {}
};
} // namespace platf
| 1,570
|
C++
|
.h
| 54
| 23.944444
| 73
| 0.650931
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,084
|
av_video.h
|
LizardByte_Sunshine/src/platform/macos/av_video.h
|
/**
* @file src/platform/macos/av_video.h
* @brief Declarations for video capture on macOS.
*/
#pragma once
#import <AVFoundation/AVFoundation.h>
#import <AppKit/AppKit.h>
struct CaptureSession {
AVCaptureVideoDataOutput *output;
NSCondition *captureStopped;
};
@interface AVVideo: NSObject <AVCaptureVideoDataOutputSampleBufferDelegate>
#define kMaxDisplays 32
@property (nonatomic, assign) CGDirectDisplayID displayID;
@property (nonatomic, assign) CMTime minFrameDuration;
@property (nonatomic, assign) OSType pixelFormat;
@property (nonatomic, assign) int frameWidth;
@property (nonatomic, assign) int frameHeight;
typedef bool (^FrameCallbackBlock)(CMSampleBufferRef);
@property (nonatomic, assign) AVCaptureSession *session;
@property (nonatomic, assign) NSMapTable<AVCaptureConnection *, AVCaptureVideoDataOutput *> *videoOutputs;
@property (nonatomic, assign) NSMapTable<AVCaptureConnection *, FrameCallbackBlock> *captureCallbacks;
@property (nonatomic, assign) NSMapTable<AVCaptureConnection *, dispatch_semaphore_t> *captureSignals;
+ (NSArray<NSDictionary *> *)displayNames;
+ (NSString *)getDisplayName:(CGDirectDisplayID)displayID;
- (id)initWithDisplay:(CGDirectDisplayID)displayID frameRate:(int)frameRate;
- (void)setFrameWidth:(int)frameWidth frameHeight:(int)frameHeight;
- (dispatch_semaphore_t)capture:(FrameCallbackBlock)frameCallback;
@end
| 1,381
|
C++
|
.h
| 29
| 46
| 106
| 0.823266
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,085
|
misc.h
|
LizardByte_Sunshine/src/platform/macos/misc.h
|
/**
* @file src/platform/macos/misc.h
* @brief Miscellaneous declarations for macOS platform.
*/
#pragma once
#include <vector>
#include <CoreGraphics/CoreGraphics.h>
namespace platf {
bool
is_screen_capture_allowed();
}
namespace dyn {
typedef void (*apiproc)();
int
load(void *handle, const std::vector<std::tuple<apiproc *, const char *>> &funcs, bool strict = true);
void *
handle(const std::vector<const char *> &libs);
} // namespace dyn
| 468
|
C++
|
.h
| 18
| 23.722222
| 104
| 0.718468
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,086
|
nv12_zero_device.h
|
LizardByte_Sunshine/src/platform/macos/nv12_zero_device.h
|
/**
* @file src/platform/macos/nv12_zero_device.h
* @brief Declarations for NV12 zero copy device on macOS.
*/
#pragma once
#include "src/platform/common.h"
struct AVFrame;
namespace platf {
void
free_frame(AVFrame *frame);
class nv12_zero_device: public avcodec_encode_device_t {
// display holds a pointer to an av_video object. Since the namespaces of AVFoundation
// and FFMPEG collide, we need this opaque pointer and cannot use the definition
void *display;
public:
// this function is used to set the resolution on an av_video object that we cannot
// call directly because of namespace collisions between AVFoundation and FFMPEG
using resolution_fn_t = std::function<void(void *display, int width, int height)>;
resolution_fn_t resolution_fn;
using pixel_format_fn_t = std::function<void(void *display, int pixelFormat)>;
int
init(void *display, pix_fmt_e pix_fmt, resolution_fn_t resolution_fn, const pixel_format_fn_t &pixel_format_fn);
int
convert(img_t &img) override;
int
set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx) override;
private:
util::safe_ptr<AVFrame, free_frame> av_frame;
};
} // namespace platf
| 1,213
|
C++
|
.h
| 30
| 36.633333
| 116
| 0.73339
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,087
|
av_audio.h
|
LizardByte_Sunshine/src/platform/macos/av_audio.h
|
/**
* @file src/platform/macos/av_audio.h
* @brief Declarations for audio capture on macOS.
*/
#pragma once
#import <AVFoundation/AVFoundation.h>
#include "third-party/TPCircularBuffer/TPCircularBuffer.h"
#define kBufferLength 4096
@interface AVAudio: NSObject <AVCaptureAudioDataOutputSampleBufferDelegate> {
@public
TPCircularBuffer audioSampleBuffer;
}
@property (nonatomic, assign) AVCaptureSession *audioCaptureSession;
@property (nonatomic, assign) AVCaptureConnection *audioConnection;
@property (nonatomic, assign) NSCondition *samplesArrivedSignal;
+ (NSArray *)microphoneNames;
+ (AVCaptureDevice *)findMicrophone:(NSString *)name;
- (int)setupMicrophone:(AVCaptureDevice *)device sampleRate:(UInt32)sampleRate frameSize:(UInt32)frameSize channels:(UInt8)channels;
@end
| 793
|
C++
|
.h
| 19
| 40.052632
| 132
| 0.826371
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,088
|
keylayout.h
|
LizardByte_Sunshine/src/platform/windows/keylayout.h
|
/**
* @file src/platform/windows/keylayout.h
* @brief Keyboard layout mapping for scancode translation
*/
#pragma once
#include <array>
#include <cstdint>
namespace platf {
// Virtual Key to Scan Code mapping for the US English layout (00000409).
// GameStream uses this as the canonical key layout for scancode conversion.
constexpr std::array<std::uint8_t, std::numeric_limits<std::uint8_t>::max() + 1> VK_TO_SCANCODE_MAP {
0, /* 0x00 */
0, /* 0x01 */
0, /* 0x02 */
70, /* 0x03 */
0, /* 0x04 */
0, /* 0x05 */
0, /* 0x06 */
0, /* 0x07 */
14, /* 0x08 */
15, /* 0x09 */
0, /* 0x0a */
0, /* 0x0b */
76, /* 0x0c */
28, /* 0x0d */
0, /* 0x0e */
0, /* 0x0f */
42, /* 0x10 */
29, /* 0x11 */
56, /* 0x12 */
0, /* 0x13 */
58, /* 0x14 */
0, /* 0x15 */
0, /* 0x16 */
0, /* 0x17 */
0, /* 0x18 */
0, /* 0x19 */
0, /* 0x1a */
1, /* 0x1b */
0, /* 0x1c */
0, /* 0x1d */
0, /* 0x1e */
0, /* 0x1f */
57, /* 0x20 */
73, /* 0x21 */
81, /* 0x22 */
79, /* 0x23 */
71, /* 0x24 */
75, /* 0x25 */
72, /* 0x26 */
77, /* 0x27 */
80, /* 0x28 */
0, /* 0x29 */
0, /* 0x2a */
0, /* 0x2b */
84, /* 0x2c */
82, /* 0x2d */
83, /* 0x2e */
99, /* 0x2f */
11, /* 0x30 */
2, /* 0x31 */
3, /* 0x32 */
4, /* 0x33 */
5, /* 0x34 */
6, /* 0x35 */
7, /* 0x36 */
8, /* 0x37 */
9, /* 0x38 */
10, /* 0x39 */
0, /* 0x3a */
0, /* 0x3b */
0, /* 0x3c */
0, /* 0x3d */
0, /* 0x3e */
0, /* 0x3f */
0, /* 0x40 */
30, /* 0x41 */
48, /* 0x42 */
46, /* 0x43 */
32, /* 0x44 */
18, /* 0x45 */
33, /* 0x46 */
34, /* 0x47 */
35, /* 0x48 */
23, /* 0x49 */
36, /* 0x4a */
37, /* 0x4b */
38, /* 0x4c */
50, /* 0x4d */
49, /* 0x4e */
24, /* 0x4f */
25, /* 0x50 */
16, /* 0x51 */
19, /* 0x52 */
31, /* 0x53 */
20, /* 0x54 */
22, /* 0x55 */
47, /* 0x56 */
17, /* 0x57 */
45, /* 0x58 */
21, /* 0x59 */
44, /* 0x5a */
91, /* 0x5b */
92, /* 0x5c */
93, /* 0x5d */
0, /* 0x5e */
95, /* 0x5f */
82, /* 0x60 */
79, /* 0x61 */
80, /* 0x62 */
81, /* 0x63 */
75, /* 0x64 */
76, /* 0x65 */
77, /* 0x66 */
71, /* 0x67 */
72, /* 0x68 */
73, /* 0x69 */
55, /* 0x6a */
78, /* 0x6b */
0, /* 0x6c */
74, /* 0x6d */
83, /* 0x6e */
53, /* 0x6f */
59, /* 0x70 */
60, /* 0x71 */
61, /* 0x72 */
62, /* 0x73 */
63, /* 0x74 */
64, /* 0x75 */
65, /* 0x76 */
66, /* 0x77 */
67, /* 0x78 */
68, /* 0x79 */
87, /* 0x7a */
88, /* 0x7b */
100, /* 0x7c */
101, /* 0x7d */
102, /* 0x7e */
103, /* 0x7f */
104, /* 0x80 */
105, /* 0x81 */
106, /* 0x82 */
107, /* 0x83 */
108, /* 0x84 */
109, /* 0x85 */
110, /* 0x86 */
118, /* 0x87 */
0, /* 0x88 */
0, /* 0x89 */
0, /* 0x8a */
0, /* 0x8b */
0, /* 0x8c */
0, /* 0x8d */
0, /* 0x8e */
0, /* 0x8f */
69, /* 0x90 */
70, /* 0x91 */
0, /* 0x92 */
0, /* 0x93 */
0, /* 0x94 */
0, /* 0x95 */
0, /* 0x96 */
0, /* 0x97 */
0, /* 0x98 */
0, /* 0x99 */
0, /* 0x9a */
0, /* 0x9b */
0, /* 0x9c */
0, /* 0x9d */
0, /* 0x9e */
0, /* 0x9f */
42, /* 0xa0 */
54, /* 0xa1 */
29, /* 0xa2 */
29, /* 0xa3 */
56, /* 0xa4 */
56, /* 0xa5 */
106, /* 0xa6 */
105, /* 0xa7 */
103, /* 0xa8 */
104, /* 0xa9 */
101, /* 0xaa */
102, /* 0xab */
50, /* 0xac */
32, /* 0xad */
46, /* 0xae */
48, /* 0xaf */
25, /* 0xb0 */
16, /* 0xb1 */
36, /* 0xb2 */
34, /* 0xb3 */
108, /* 0xb4 */
109, /* 0xb5 */
107, /* 0xb6 */
33, /* 0xb7 */
0, /* 0xb8 */
0, /* 0xb9 */
39, /* 0xba */
13, /* 0xbb */
51, /* 0xbc */
12, /* 0xbd */
52, /* 0xbe */
53, /* 0xbf */
41, /* 0xc0 */
115, /* 0xc1 */
126, /* 0xc2 */
0, /* 0xc3 */
0, /* 0xc4 */
0, /* 0xc5 */
0, /* 0xc6 */
0, /* 0xc7 */
0, /* 0xc8 */
0, /* 0xc9 */
0, /* 0xca */
0, /* 0xcb */
0, /* 0xcc */
0, /* 0xcd */
0, /* 0xce */
0, /* 0xcf */
0, /* 0xd0 */
0, /* 0xd1 */
0, /* 0xd2 */
0, /* 0xd3 */
0, /* 0xd4 */
0, /* 0xd5 */
0, /* 0xd6 */
0, /* 0xd7 */
0, /* 0xd8 */
0, /* 0xd9 */
0, /* 0xda */
26, /* 0xdb */
43, /* 0xdc */
27, /* 0xdd */
40, /* 0xde */
0, /* 0xdf */
0, /* 0xe0 */
0, /* 0xe1 */
86, /* 0xe2 */
0, /* 0xe3 */
0, /* 0xe4 */
0, /* 0xe5 */
0, /* 0xe6 */
0, /* 0xe7 */
0, /* 0xe8 */
113, /* 0xe9 */
92, /* 0xea */
123, /* 0xeb */
0, /* 0xec */
111, /* 0xed */
90, /* 0xee */
0, /* 0xef */
0, /* 0xf0 */
91, /* 0xf1 */
0, /* 0xf2 */
95, /* 0xf3 */
0, /* 0xf4 */
94, /* 0xf5 */
0, /* 0xf6 */
0, /* 0xf7 */
0, /* 0xf8 */
93, /* 0xf9 */
0, /* 0xfa */
98, /* 0xfb */
0, /* 0xfc */
0, /* 0xfd */
0, /* 0xfe */
0, /* 0xff */
};
} // namespace platf
| 5,241
|
C++
|
.h
| 269
| 14.628253
| 103
| 0.35835
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,089
|
misc.h
|
LizardByte_Sunshine/src/platform/windows/misc.h
|
/**
* @file src/platform/windows/misc.h
* @brief Miscellaneous declarations for Windows.
*/
#pragma once
#include <chrono>
#include <string_view>
#include <windows.h>
#include <winnt.h>
namespace platf {
void
print_status(const std::string_view &prefix, HRESULT status);
HDESK
syncThreadDesktop();
int64_t
qpc_counter();
std::chrono::nanoseconds
qpc_time_difference(int64_t performance_counter1, int64_t performance_counter2);
/**
* @brief Convert a UTF-8 string into a UTF-16 wide string.
* @param string The UTF-8 string.
* @return The converted UTF-16 wide string.
*/
std::wstring
from_utf8(const std::string &string);
/**
* @brief Convert a UTF-16 wide string into a UTF-8 string.
* @param string The UTF-16 wide string.
* @return The converted UTF-8 string.
*/
std::string
to_utf8(const std::wstring &string);
} // namespace platf
| 898
|
C++
|
.h
| 33
| 24.363636
| 82
| 0.714785
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,090
|
display.h
|
LizardByte_Sunshine/src/platform/windows/display.h
|
/**
* @file src/platform/windows/display.h
* @brief Declarations for the Windows display backend.
*/
#pragma once
#include <d3d11.h>
#include <d3d11_4.h>
#include <d3dcommon.h>
#include <dwmapi.h>
#include <dxgi.h>
#include <dxgi1_6.h>
#include <Unknwn.h>
#include <winrt/Windows.Graphics.Capture.h>
#include "src/platform/common.h"
#include "src/utility.h"
#include "src/video.h"
namespace platf::dxgi {
extern const char *format_str[];
// Add D3D11_CREATE_DEVICE_DEBUG here to enable the D3D11 debug runtime.
// You should have a debugger like WinDbg attached to receive debug messages.
auto constexpr D3D11_CREATE_DEVICE_FLAGS = D3D11_CREATE_DEVICE_VIDEO_SUPPORT;
template <class T>
void
Release(T *dxgi) {
dxgi->Release();
}
using factory1_t = util::safe_ptr<IDXGIFactory1, Release<IDXGIFactory1>>;
using dxgi_t = util::safe_ptr<IDXGIDevice, Release<IDXGIDevice>>;
using dxgi1_t = util::safe_ptr<IDXGIDevice1, Release<IDXGIDevice1>>;
using device_t = util::safe_ptr<ID3D11Device, Release<ID3D11Device>>;
using device1_t = util::safe_ptr<ID3D11Device1, Release<ID3D11Device1>>;
using device_ctx_t = util::safe_ptr<ID3D11DeviceContext, Release<ID3D11DeviceContext>>;
using adapter_t = util::safe_ptr<IDXGIAdapter1, Release<IDXGIAdapter1>>;
using output_t = util::safe_ptr<IDXGIOutput, Release<IDXGIOutput>>;
using output1_t = util::safe_ptr<IDXGIOutput1, Release<IDXGIOutput1>>;
using output5_t = util::safe_ptr<IDXGIOutput5, Release<IDXGIOutput5>>;
using output6_t = util::safe_ptr<IDXGIOutput6, Release<IDXGIOutput6>>;
using dup_t = util::safe_ptr<IDXGIOutputDuplication, Release<IDXGIOutputDuplication>>;
using texture2d_t = util::safe_ptr<ID3D11Texture2D, Release<ID3D11Texture2D>>;
using texture1d_t = util::safe_ptr<ID3D11Texture1D, Release<ID3D11Texture1D>>;
using resource_t = util::safe_ptr<IDXGIResource, Release<IDXGIResource>>;
using resource1_t = util::safe_ptr<IDXGIResource1, Release<IDXGIResource1>>;
using multithread_t = util::safe_ptr<ID3D11Multithread, Release<ID3D11Multithread>>;
using vs_t = util::safe_ptr<ID3D11VertexShader, Release<ID3D11VertexShader>>;
using ps_t = util::safe_ptr<ID3D11PixelShader, Release<ID3D11PixelShader>>;
using blend_t = util::safe_ptr<ID3D11BlendState, Release<ID3D11BlendState>>;
using input_layout_t = util::safe_ptr<ID3D11InputLayout, Release<ID3D11InputLayout>>;
using render_target_t = util::safe_ptr<ID3D11RenderTargetView, Release<ID3D11RenderTargetView>>;
using shader_res_t = util::safe_ptr<ID3D11ShaderResourceView, Release<ID3D11ShaderResourceView>>;
using buf_t = util::safe_ptr<ID3D11Buffer, Release<ID3D11Buffer>>;
using raster_state_t = util::safe_ptr<ID3D11RasterizerState, Release<ID3D11RasterizerState>>;
using sampler_state_t = util::safe_ptr<ID3D11SamplerState, Release<ID3D11SamplerState>>;
using blob_t = util::safe_ptr<ID3DBlob, Release<ID3DBlob>>;
using depth_stencil_state_t = util::safe_ptr<ID3D11DepthStencilState, Release<ID3D11DepthStencilState>>;
using depth_stencil_view_t = util::safe_ptr<ID3D11DepthStencilView, Release<ID3D11DepthStencilView>>;
using keyed_mutex_t = util::safe_ptr<IDXGIKeyedMutex, Release<IDXGIKeyedMutex>>;
namespace video {
using device_t = util::safe_ptr<ID3D11VideoDevice, Release<ID3D11VideoDevice>>;
using ctx_t = util::safe_ptr<ID3D11VideoContext, Release<ID3D11VideoContext>>;
using processor_t = util::safe_ptr<ID3D11VideoProcessor, Release<ID3D11VideoProcessor>>;
using processor_out_t = util::safe_ptr<ID3D11VideoProcessorOutputView, Release<ID3D11VideoProcessorOutputView>>;
using processor_in_t = util::safe_ptr<ID3D11VideoProcessorInputView, Release<ID3D11VideoProcessorInputView>>;
using processor_enum_t = util::safe_ptr<ID3D11VideoProcessorEnumerator, Release<ID3D11VideoProcessorEnumerator>>;
} // namespace video
class hwdevice_t;
struct cursor_t {
std::vector<std::uint8_t> img_data;
DXGI_OUTDUPL_POINTER_SHAPE_INFO shape_info;
int x, y;
bool visible;
};
class gpu_cursor_t {
public:
gpu_cursor_t():
cursor_view { 0, 0, 0, 0, 0.0f, 1.0f } {};
void
set_pos(LONG topleft_x, LONG topleft_y, LONG display_width, LONG display_height, DXGI_MODE_ROTATION display_rotation, bool visible) {
this->topleft_x = topleft_x;
this->topleft_y = topleft_y;
this->display_width = display_width;
this->display_height = display_height;
this->display_rotation = display_rotation;
this->visible = visible;
update_viewport();
}
void
set_texture(LONG texture_width, LONG texture_height, texture2d_t &&texture) {
this->texture = std::move(texture);
this->texture_width = texture_width;
this->texture_height = texture_height;
update_viewport();
}
void
update_viewport() {
switch (display_rotation) {
case DXGI_MODE_ROTATION_UNSPECIFIED:
case DXGI_MODE_ROTATION_IDENTITY:
cursor_view.TopLeftX = topleft_x;
cursor_view.TopLeftY = topleft_y;
cursor_view.Width = texture_width;
cursor_view.Height = texture_height;
break;
case DXGI_MODE_ROTATION_ROTATE90:
cursor_view.TopLeftX = topleft_y;
cursor_view.TopLeftY = display_width - texture_width - topleft_x;
cursor_view.Width = texture_height;
cursor_view.Height = texture_width;
break;
case DXGI_MODE_ROTATION_ROTATE180:
cursor_view.TopLeftX = display_width - texture_width - topleft_x;
cursor_view.TopLeftY = display_height - texture_height - topleft_y;
cursor_view.Width = texture_width;
cursor_view.Height = texture_height;
break;
case DXGI_MODE_ROTATION_ROTATE270:
cursor_view.TopLeftX = display_height - texture_height - topleft_y;
cursor_view.TopLeftY = topleft_x;
cursor_view.Width = texture_height;
cursor_view.Height = texture_width;
break;
}
}
texture2d_t texture;
LONG texture_width;
LONG texture_height;
LONG topleft_x;
LONG topleft_y;
LONG display_width;
LONG display_height;
DXGI_MODE_ROTATION display_rotation;
shader_res_t input_res;
D3D11_VIEWPORT cursor_view;
bool visible;
};
class display_base_t: public display_t {
public:
int
init(const ::video::config_t &config, const std::string &display_name);
capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override;
factory1_t factory;
adapter_t adapter;
output_t output;
device_t device;
device_ctx_t device_ctx;
DXGI_RATIONAL display_refresh_rate;
int display_refresh_rate_rounded;
DXGI_MODE_ROTATION display_rotation = DXGI_MODE_ROTATION_UNSPECIFIED;
int width_before_rotation;
int height_before_rotation;
int client_frame_rate;
DXGI_FORMAT capture_format;
D3D_FEATURE_LEVEL feature_level;
std::unique_ptr<high_precision_timer> timer = create_high_precision_timer();
typedef enum _D3DKMT_SCHEDULINGPRIORITYCLASS {
D3DKMT_SCHEDULINGPRIORITYCLASS_IDLE, ///< Idle priority class
D3DKMT_SCHEDULINGPRIORITYCLASS_BELOW_NORMAL, ///< Below normal priority class
D3DKMT_SCHEDULINGPRIORITYCLASS_NORMAL, ///< Normal priority class
D3DKMT_SCHEDULINGPRIORITYCLASS_ABOVE_NORMAL, ///< Above normal priority class
D3DKMT_SCHEDULINGPRIORITYCLASS_HIGH, ///< High priority class
D3DKMT_SCHEDULINGPRIORITYCLASS_REALTIME ///< Realtime priority class
} D3DKMT_SCHEDULINGPRIORITYCLASS;
typedef UINT D3DKMT_HANDLE;
typedef struct _D3DKMT_OPENADAPTERFROMLUID {
LUID AdapterLuid;
D3DKMT_HANDLE hAdapter;
} D3DKMT_OPENADAPTERFROMLUID;
typedef struct _D3DKMT_WDDM_2_7_CAPS {
union {
struct
{
UINT HwSchSupported : 1;
UINT HwSchEnabled : 1;
UINT HwSchEnabledByDefault : 1;
UINT IndependentVidPnVSyncControl : 1;
UINT Reserved : 28;
};
UINT Value;
};
} D3DKMT_WDDM_2_7_CAPS;
typedef struct _D3DKMT_QUERYADAPTERINFO {
D3DKMT_HANDLE hAdapter;
UINT Type;
VOID *pPrivateDriverData;
UINT PrivateDriverDataSize;
} D3DKMT_QUERYADAPTERINFO;
const UINT KMTQAITYPE_WDDM_2_7_CAPS = 70;
typedef struct _D3DKMT_CLOSEADAPTER {
D3DKMT_HANDLE hAdapter;
} D3DKMT_CLOSEADAPTER;
typedef NTSTATUS(WINAPI *PD3DKMTSetProcessSchedulingPriorityClass)(HANDLE, D3DKMT_SCHEDULINGPRIORITYCLASS);
typedef NTSTATUS(WINAPI *PD3DKMTOpenAdapterFromLuid)(D3DKMT_OPENADAPTERFROMLUID *);
typedef NTSTATUS(WINAPI *PD3DKMTQueryAdapterInfo)(D3DKMT_QUERYADAPTERINFO *);
typedef NTSTATUS(WINAPI *PD3DKMTCloseAdapter)(D3DKMT_CLOSEADAPTER *);
virtual bool
is_hdr() override;
virtual bool
get_hdr_metadata(SS_HDR_METADATA &metadata) override;
const char *
dxgi_format_to_string(DXGI_FORMAT format);
const char *
colorspace_to_string(DXGI_COLOR_SPACE_TYPE type);
virtual std::vector<DXGI_FORMAT>
get_supported_capture_formats() = 0;
protected:
int
get_pixel_pitch() {
return (capture_format == DXGI_FORMAT_R16G16B16A16_FLOAT) ? 8 : 4;
}
virtual capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) = 0;
virtual capture_e
release_snapshot() = 0;
virtual int
complete_img(img_t *img, bool dummy) = 0;
};
/**
* Display component for devices that use software encoders.
*/
class display_ram_t: public display_base_t {
public:
std::shared_ptr<img_t>
alloc_img() override;
int
dummy_img(img_t *img) override;
int
complete_img(img_t *img, bool dummy) override;
std::vector<DXGI_FORMAT>
get_supported_capture_formats() override;
std::unique_ptr<avcodec_encode_device_t>
make_avcodec_encode_device(pix_fmt_e pix_fmt) override;
D3D11_MAPPED_SUBRESOURCE img_info;
texture2d_t texture;
};
/**
* Display component for devices that use hardware encoders.
*/
class display_vram_t: public display_base_t, public std::enable_shared_from_this<display_vram_t> {
public:
std::shared_ptr<img_t>
alloc_img() override;
int
dummy_img(img_t *img_base) override;
int
complete_img(img_t *img_base, bool dummy) override;
std::vector<DXGI_FORMAT>
get_supported_capture_formats() override;
bool
is_codec_supported(std::string_view name, const ::video::config_t &config) override;
std::unique_ptr<avcodec_encode_device_t>
make_avcodec_encode_device(pix_fmt_e pix_fmt) override;
std::unique_ptr<nvenc_encode_device_t>
make_nvenc_encode_device(pix_fmt_e pix_fmt) override;
std::atomic<uint32_t> next_image_id;
};
/**
* Display duplicator that uses the DirectX Desktop Duplication API.
*/
class duplication_t {
public:
dup_t dup;
bool has_frame {};
std::chrono::steady_clock::time_point last_protected_content_warning_time {};
int
init(display_base_t *display, const ::video::config_t &config);
capture_e
next_frame(DXGI_OUTDUPL_FRAME_INFO &frame_info, std::chrono::milliseconds timeout, resource_t::pointer *res_p);
capture_e
reset(dup_t::pointer dup_p = dup_t::pointer());
capture_e
release_frame();
~duplication_t();
};
/**
* Display backend that uses DDAPI with a software encoder.
*/
class display_ddup_ram_t: public display_ram_t {
public:
int
init(const ::video::config_t &config, const std::string &display_name);
capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) override;
capture_e
release_snapshot() override;
duplication_t dup;
cursor_t cursor;
};
/**
* Display backend that uses DDAPI with a hardware encoder.
*/
class display_ddup_vram_t: public display_vram_t {
public:
int
init(const ::video::config_t &config, const std::string &display_name);
capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) override;
capture_e
release_snapshot() override;
duplication_t dup;
sampler_state_t sampler_linear;
blend_t blend_alpha;
blend_t blend_invert;
blend_t blend_disable;
ps_t cursor_ps;
vs_t cursor_vs;
gpu_cursor_t cursor_alpha;
gpu_cursor_t cursor_xor;
texture2d_t old_surface_delayed_destruction;
std::chrono::steady_clock::time_point old_surface_timestamp;
std::variant<std::monostate, texture2d_t, std::shared_ptr<platf::img_t>> last_frame_variant;
};
/**
* Display duplicator that uses the Windows.Graphics.Capture API.
*/
class wgc_capture_t {
winrt::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice uwp_device { nullptr };
winrt::Windows::Graphics::Capture::GraphicsCaptureItem item { nullptr };
winrt::Windows::Graphics::Capture::Direct3D11CaptureFramePool frame_pool { nullptr };
winrt::Windows::Graphics::Capture::GraphicsCaptureSession capture_session { nullptr };
winrt::Windows::Graphics::Capture::Direct3D11CaptureFrame produced_frame { nullptr }, consumed_frame { nullptr };
SRWLOCK frame_lock = SRWLOCK_INIT;
CONDITION_VARIABLE frame_present_cv;
void
on_frame_arrived(winrt::Windows::Graphics::Capture::Direct3D11CaptureFramePool const &sender, winrt::Windows::Foundation::IInspectable const &);
public:
wgc_capture_t();
~wgc_capture_t();
int
init(display_base_t *display, const ::video::config_t &config);
capture_e
next_frame(std::chrono::milliseconds timeout, ID3D11Texture2D **out, uint64_t &out_time);
capture_e
release_frame();
int
set_cursor_visible(bool);
};
/**
* Display backend that uses Windows.Graphics.Capture with a software encoder.
*/
class display_wgc_ram_t: public display_ram_t {
wgc_capture_t dup;
public:
int
init(const ::video::config_t &config, const std::string &display_name);
capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) override;
capture_e
release_snapshot() override;
};
/**
* Display backend that uses Windows.Graphics.Capture with a hardware encoder.
*/
class display_wgc_vram_t: public display_vram_t {
wgc_capture_t dup;
public:
int
init(const ::video::config_t &config, const std::string &display_name);
capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) override;
capture_e
release_snapshot() override;
};
} // namespace platf::dxgi
| 15,081
|
C++
|
.h
| 363
| 36.358127
| 166
| 0.714793
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,091
|
PolicyConfig.h
|
LizardByte_Sunshine/src/platform/windows/PolicyConfig.h
|
/**
* @file src/platform/windows/PolicyConfig.h
* @brief Undocumented COM-interface IPolicyConfig.
* @details Use for setting default audio render endpoint.
* @author EreTIk
* @see https://kitere.github.io/
*/
#pragma once
#include <mmdeviceapi.h>
#ifdef __MINGW32__
#undef DEFINE_GUID
#ifdef __cplusplus
#define DEFINE_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) EXTERN_C const GUID DECLSPEC_SELECTANY name = { l, w1, w2, { b1, b2, b3, b4, b5, b6, b7, b8 } }
#else
#define DEFINE_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) const GUID DECLSPEC_SELECTANY name = { l, w1, w2, { b1, b2, b3, b4, b5, b6, b7, b8 } }
#endif
DEFINE_GUID(IID_IPolicyConfig, 0xf8679f50, 0x850a, 0x41cf, 0x9c, 0x72, 0x43, 0x0f, 0x29, 0x02, 0x90, 0xc8);
DEFINE_GUID(CLSID_CPolicyConfigClient, 0x870af99c, 0x171d, 0x4f9e, 0xaf, 0x0d, 0xe6, 0x3d, 0xf4, 0x0c, 0x2b, 0xc9);
#endif
interface DECLSPEC_UUID("f8679f50-850a-41cf-9c72-430f290290c8") IPolicyConfig;
class DECLSPEC_UUID("870af99c-171d-4f9e-af0d-e63df40c2bc9") CPolicyConfigClient;
// ----------------------------------------------------------------------------
// class CPolicyConfigClient
// {870af99c-171d-4f9e-af0d-e63df40c2bc9}
//
// interface IPolicyConfig
// {f8679f50-850a-41cf-9c72-430f290290c8}
//
// Query interface:
// CComPtr<IPolicyConfig> PolicyConfig;
// PolicyConfig.CoCreateInstance(__uuidof(CPolicyConfigClient));
//
// @compatible: Windows 7 and Later
// ----------------------------------------------------------------------------
interface IPolicyConfig: public IUnknown {
public:
virtual HRESULT
GetMixFormat(
PCWSTR,
WAVEFORMATEX **);
virtual HRESULT STDMETHODCALLTYPE
GetDeviceFormat(
PCWSTR,
INT,
WAVEFORMATEX **);
virtual HRESULT STDMETHODCALLTYPE ResetDeviceFormat(
PCWSTR);
virtual HRESULT STDMETHODCALLTYPE
SetDeviceFormat(
PCWSTR,
WAVEFORMATEX *,
WAVEFORMATEX *);
virtual HRESULT STDMETHODCALLTYPE GetProcessingPeriod(
PCWSTR,
INT,
PINT64,
PINT64);
virtual HRESULT STDMETHODCALLTYPE SetProcessingPeriod(
PCWSTR,
PINT64);
virtual HRESULT STDMETHODCALLTYPE
GetShareMode(
PCWSTR,
struct DeviceShareMode *);
virtual HRESULT STDMETHODCALLTYPE
SetShareMode(
PCWSTR,
struct DeviceShareMode *);
virtual HRESULT STDMETHODCALLTYPE
GetPropertyValue(
PCWSTR,
const PROPERTYKEY &,
PROPVARIANT *);
virtual HRESULT STDMETHODCALLTYPE
SetPropertyValue(
PCWSTR,
const PROPERTYKEY &,
PROPVARIANT *);
virtual HRESULT STDMETHODCALLTYPE
SetDefaultEndpoint(
PCWSTR wszDeviceId,
ERole eRole);
virtual HRESULT STDMETHODCALLTYPE SetEndpointVisibility(
PCWSTR,
INT);
};
| 2,717
|
C++
|
.h
| 86
| 28.325581
| 168
| 0.697016
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,092
|
undo_file.h
|
LizardByte_Sunshine/src/platform/windows/nvprefs/undo_file.h
|
/**
* @file src/platform/windows/nvprefs/undo_file.h
* @brief Declarations for the nvidia undo file.
*/
#pragma once
// standard library headers
#include <filesystem>
// local includes
#include "nvprefs_common.h"
#include "undo_data.h"
namespace nvprefs {
class undo_file_t {
public:
static std::optional<undo_file_t>
open_existing_file(std::filesystem::path file_path, bool &access_denied);
static std::optional<undo_file_t>
create_new_file(std::filesystem::path file_path);
bool
delete_file();
bool
write_undo_data(const undo_data_t &undo_data);
std::optional<undo_data_t>
read_undo_data();
private:
undo_file_t() = default;
safe_handle file_handle;
};
} // namespace nvprefs
| 748
|
C++
|
.h
| 28
| 23.25
| 77
| 0.705634
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,093
|
nvprefs_interface.h
|
LizardByte_Sunshine/src/platform/windows/nvprefs/nvprefs_interface.h
|
/**
* @file src/platform/windows/nvprefs/nvprefs_interface.h
* @brief Declarations for nvidia preferences interface.
*/
#pragma once
// standard library headers
#include <memory>
namespace nvprefs {
class nvprefs_interface {
public:
nvprefs_interface();
~nvprefs_interface();
bool
load();
void
unload();
bool
restore_from_and_delete_undo_file_if_exists();
bool
modify_application_profile();
bool
modify_global_profile();
bool
owning_undo_file();
bool
restore_global_profile();
private:
struct impl;
std::unique_ptr<impl> pimpl;
};
} // namespace nvprefs
| 650
|
C++
|
.h
| 31
| 16.903226
| 57
| 0.686985
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,094
|
driver_settings.h
|
LizardByte_Sunshine/src/platform/windows/nvprefs/driver_settings.h
|
/**
* @file src/platform/windows/nvprefs/driver_settings.h
* @brief Declarations for nvidia driver settings.
*/
#pragma once
// nvapi headers
// disable clang-format header reordering
// as <NvApiDriverSettings.h> needs types from <nvapi.h>
// clang-format off
#include <nvapi.h>
#include <NvApiDriverSettings.h>
// clang-format on
// local includes
#include "undo_data.h"
namespace nvprefs {
class driver_settings_t {
public:
~driver_settings_t();
bool
init();
void
destroy();
bool
load_settings();
bool
save_settings();
bool
restore_global_profile_to_undo(const undo_data_t &undo_data);
bool
check_and_modify_global_profile(std::optional<undo_data_t> &undo_data);
bool
check_and_modify_application_profile(bool &modified);
private:
NvDRSSessionHandle session_handle = 0;
};
} // namespace nvprefs
| 888
|
C++
|
.h
| 36
| 21.222222
| 75
| 0.715137
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,095
|
nvprefs_common.h
|
LizardByte_Sunshine/src/platform/windows/nvprefs/nvprefs_common.h
|
/**
* @file src/platform/windows/nvprefs/nvprefs_common.h
* @brief Declarations for common nvidia preferences.
*/
#pragma once
// sunshine utility header for generic smart pointers
#include "src/utility.h"
// winapi headers
// disable clang-format header reordering
// clang-format off
#include <windows.h>
#include <aclapi.h>
// clang-format on
namespace nvprefs {
struct safe_handle: public util::safe_ptr_v2<void, BOOL, CloseHandle> {
using util::safe_ptr_v2<void, BOOL, CloseHandle>::safe_ptr_v2;
explicit
operator bool() const {
auto handle = get();
return handle != NULL && handle != INVALID_HANDLE_VALUE;
}
};
struct safe_hlocal_deleter {
void
operator()(void *p) {
LocalFree(p);
}
};
template <typename T>
using safe_hlocal = util::uniq_ptr<std::remove_pointer_t<T>, safe_hlocal_deleter>;
using safe_sid = util::safe_ptr_v2<void, PVOID, FreeSid>;
void
info_message(const std::wstring &message);
void
info_message(const std::string &message);
void
error_message(const std::wstring &message);
void
error_message(const std::string &message);
struct nvprefs_options {
bool opengl_vulkan_on_dxgi = true;
bool sunshine_high_power_mode = true;
};
nvprefs_options
get_nvprefs_options();
} // namespace nvprefs
| 1,320
|
C++
|
.h
| 46
| 25.326087
| 84
| 0.709524
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,096
|
undo_data.h
|
LizardByte_Sunshine/src/platform/windows/nvprefs/undo_data.h
|
/**
* @file src/platform/windows/nvprefs/undo_data.h
* @brief Declarations for undoing changes to nvidia preferences.
*/
#pragma once
// standard library headers
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
namespace nvprefs {
class undo_data_t {
public:
struct data_t {
struct opengl_swapchain_t {
uint32_t our_value;
std::optional<uint32_t> undo_value;
};
std::optional<opengl_swapchain_t> opengl_swapchain;
};
void
set_opengl_swapchain(uint32_t our_value, std::optional<uint32_t> undo_value);
std::optional<data_t::opengl_swapchain_t>
get_opengl_swapchain() const;
std::string
write() const;
void
read(const std::vector<char> &buffer);
void
merge(const undo_data_t &newer_data);
private:
data_t data;
};
} // namespace nvprefs
| 869
|
C++
|
.h
| 34
| 21.382353
| 81
| 0.68568
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
4,098
|
egl.h
|
LizardByte_Sunshine/third-party/glad/include/glad/egl.h
|
/**
* Loader generated by glad 2.0.0-beta on Tue Jun 1 10:22:05 2021
*
* Generator: C/C++
* Specification: egl
* Extensions: 0
*
* APIs:
* - egl=1.5
*
* Options:
* - ALIAS = False
* - DEBUG = False
* - HEADER_ONLY = False
* - LOADER = True
* - MX = True
* - MX_GLOBAL = False
* - ON_DEMAND = False
*
* Commandline:
* --api='egl=1.5' --extensions='' c --loader --mx
*
* Online:
* http://glad.sh/#api=egl%3D1.5&extensions=&generator=c&options=LOADER%2CMX
*
*/
#ifndef GLAD_EGL_H_
#define GLAD_EGL_H_
#define GLAD_EGL
#define GLAD_OPTION_EGL_LOADER
#ifdef __cplusplus
extern "C" {
#endif
#ifndef GLAD_PLATFORM_H_
#define GLAD_PLATFORM_H_
#ifndef GLAD_PLATFORM_WIN32
#if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) || defined(__MINGW32__)
#define GLAD_PLATFORM_WIN32 1
#else
#define GLAD_PLATFORM_WIN32 0
#endif
#endif
#ifndef GLAD_PLATFORM_APPLE
#ifdef __APPLE__
#define GLAD_PLATFORM_APPLE 1
#else
#define GLAD_PLATFORM_APPLE 0
#endif
#endif
#ifndef GLAD_PLATFORM_EMSCRIPTEN
#ifdef __EMSCRIPTEN__
#define GLAD_PLATFORM_EMSCRIPTEN 1
#else
#define GLAD_PLATFORM_EMSCRIPTEN 0
#endif
#endif
#ifndef GLAD_PLATFORM_UWP
#if defined(_MSC_VER) && !defined(GLAD_INTERNAL_HAVE_WINAPIFAMILY)
#ifdef __has_include
#if __has_include(<winapifamily.h>)
#define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1
#endif
#elif _MSC_VER >= 1700 && !_USING_V110_SDK71_
#define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1
#endif
#endif
#ifdef GLAD_INTERNAL_HAVE_WINAPIFAMILY
#include <winapifamily.h>
#if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
#define GLAD_PLATFORM_UWP 1
#endif
#endif
#ifndef GLAD_PLATFORM_UWP
#define GLAD_PLATFORM_UWP 0
#endif
#endif
#ifdef __GNUC__
#define GLAD_GNUC_EXTENSION __extension__
#else
#define GLAD_GNUC_EXTENSION
#endif
#ifndef GLAD_API_CALL
#if defined(GLAD_API_CALL_EXPORT)
#if GLAD_PLATFORM_WIN32 || defined(__CYGWIN__)
#if defined(GLAD_API_CALL_EXPORT_BUILD)
#if defined(__GNUC__)
#define GLAD_API_CALL __attribute__((dllexport)) extern
#else
#define GLAD_API_CALL __declspec(dllexport) extern
#endif
#else
#if defined(__GNUC__)
#define GLAD_API_CALL __attribute__((dllimport)) extern
#else
#define GLAD_API_CALL __declspec(dllimport) extern
#endif
#endif
#elif defined(__GNUC__) && defined(GLAD_API_CALL_EXPORT_BUILD)
#define GLAD_API_CALL __attribute__((visibility("default"))) extern
#else
#define GLAD_API_CALL extern
#endif
#else
#define GLAD_API_CALL extern
#endif
#endif
#ifdef APIENTRY
#define GLAD_API_PTR APIENTRY
#elif GLAD_PLATFORM_WIN32
#define GLAD_API_PTR __stdcall
#else
#define GLAD_API_PTR
#endif
#ifndef GLAPI
#define GLAPI GLAD_API_CALL
#endif
#ifndef GLAPIENTRY
#define GLAPIENTRY GLAD_API_PTR
#endif
#define GLAD_MAKE_VERSION(major, minor) (major * 10000 + minor)
#define GLAD_VERSION_MAJOR(version) (version / 10000)
#define GLAD_VERSION_MINOR(version) (version % 10000)
#define GLAD_GENERATOR_VERSION "2.0.0-beta"
typedef void (*GLADapiproc)(void);
typedef GLADapiproc (*GLADloadfunc)(const char *name);
typedef GLADapiproc (*GLADuserptrloadfunc)(void *userptr, const char *name);
typedef void (*GLADprecallback)(const char *name, GLADapiproc apiproc, int len_args, ...);
typedef void (*GLADpostcallback)(void *ret, const char *name, GLADapiproc apiproc, int len_args, ...);
#endif /* GLAD_PLATFORM_H_ */
#define EGL_ALPHA_FORMAT 0x3088
#define EGL_ALPHA_FORMAT_NONPRE 0x308B
#define EGL_ALPHA_FORMAT_PRE 0x308C
#define EGL_ALPHA_MASK_SIZE 0x303E
#define EGL_ALPHA_SIZE 0x3021
#define EGL_BACK_BUFFER 0x3084
#define EGL_BAD_ACCESS 0x3002
#define EGL_BAD_ALLOC 0x3003
#define EGL_BAD_ATTRIBUTE 0x3004
#define EGL_BAD_CONFIG 0x3005
#define EGL_BAD_CONTEXT 0x3006
#define EGL_BAD_CURRENT_SURFACE 0x3007
#define EGL_BAD_DISPLAY 0x3008
#define EGL_BAD_MATCH 0x3009
#define EGL_BAD_NATIVE_PIXMAP 0x300A
#define EGL_BAD_NATIVE_WINDOW 0x300B
#define EGL_BAD_PARAMETER 0x300C
#define EGL_BAD_SURFACE 0x300D
#define EGL_BIND_TO_TEXTURE_RGB 0x3039
#define EGL_BIND_TO_TEXTURE_RGBA 0x303A
#define EGL_BLUE_SIZE 0x3022
#define EGL_BUFFER_DESTROYED 0x3095
#define EGL_BUFFER_PRESERVED 0x3094
#define EGL_BUFFER_SIZE 0x3020
#define EGL_CLIENT_APIS 0x308D
#define EGL_CL_EVENT_HANDLE 0x309C
#define EGL_COLORSPACE 0x3087
#define EGL_COLORSPACE_LINEAR 0x308A
#define EGL_COLORSPACE_sRGB 0x3089
#define EGL_COLOR_BUFFER_TYPE 0x303F
#define EGL_CONDITION_SATISFIED 0x30F6
#define EGL_CONFIG_CAVEAT 0x3027
#define EGL_CONFIG_ID 0x3028
#define EGL_CONFORMANT 0x3042
#define EGL_CONTEXT_CLIENT_TYPE 0x3097
#define EGL_CONTEXT_CLIENT_VERSION 0x3098
#define EGL_CONTEXT_LOST 0x300E
#define EGL_CONTEXT_MAJOR_VERSION 0x3098
#define EGL_CONTEXT_MINOR_VERSION 0x30FB
#define EGL_CONTEXT_OPENGL_COMPATIBILITY_PROFILE_BIT 0x00000002
#define EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT 0x00000001
#define EGL_CONTEXT_OPENGL_DEBUG 0x31B0
#define EGL_CONTEXT_OPENGL_FORWARD_COMPATIBLE 0x31B1
#define EGL_CONTEXT_OPENGL_PROFILE_MASK 0x30FD
#define EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY 0x31BD
#define EGL_CONTEXT_OPENGL_ROBUST_ACCESS 0x31B2
#define EGL_CORE_NATIVE_ENGINE 0x305B
#define EGL_DEFAULT_DISPLAY EGL_CAST(EGLNativeDisplayType, 0)
#define EGL_DEPTH_SIZE 0x3025
#define EGL_DISPLAY_SCALING 10000
#define EGL_DONT_CARE EGL_CAST(EGLint, -1)
#define EGL_DRAW 0x3059
#define EGL_EXTENSIONS 0x3055
#define EGL_FALSE 0
#define EGL_FOREVER 0xFFFFFFFFFFFFFFFF
#define EGL_GL_COLORSPACE 0x309D
#define EGL_GL_COLORSPACE_LINEAR 0x308A
#define EGL_GL_COLORSPACE_SRGB 0x3089
#define EGL_GL_RENDERBUFFER 0x30B9
#define EGL_GL_TEXTURE_2D 0x30B1
#define EGL_GL_TEXTURE_3D 0x30B2
#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x30B4
#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x30B6
#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x30B8
#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x30B3
#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x30B5
#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x30B7
#define EGL_GL_TEXTURE_LEVEL 0x30BC
#define EGL_GL_TEXTURE_ZOFFSET 0x30BD
#define EGL_GREEN_SIZE 0x3023
#define EGL_HEIGHT 0x3056
#define EGL_HORIZONTAL_RESOLUTION 0x3090
#define EGL_IMAGE_PRESERVED 0x30D2
#define EGL_LARGEST_PBUFFER 0x3058
#define EGL_LEVEL 0x3029
#define EGL_LOSE_CONTEXT_ON_RESET 0x31BF
#define EGL_LUMINANCE_BUFFER 0x308F
#define EGL_LUMINANCE_SIZE 0x303D
#define EGL_MATCH_NATIVE_PIXMAP 0x3041
#define EGL_MAX_PBUFFER_HEIGHT 0x302A
#define EGL_MAX_PBUFFER_PIXELS 0x302B
#define EGL_MAX_PBUFFER_WIDTH 0x302C
#define EGL_MAX_SWAP_INTERVAL 0x303C
#define EGL_MIN_SWAP_INTERVAL 0x303B
#define EGL_MIPMAP_LEVEL 0x3083
#define EGL_MIPMAP_TEXTURE 0x3082
#define EGL_MULTISAMPLE_RESOLVE 0x3099
#define EGL_MULTISAMPLE_RESOLVE_BOX 0x309B
#define EGL_MULTISAMPLE_RESOLVE_BOX_BIT 0x0200
#define EGL_MULTISAMPLE_RESOLVE_DEFAULT 0x309A
#define EGL_NATIVE_RENDERABLE 0x302D
#define EGL_NATIVE_VISUAL_ID 0x302E
#define EGL_NATIVE_VISUAL_TYPE 0x302F
#define EGL_NONE 0x3038
#define EGL_NON_CONFORMANT_CONFIG 0x3051
#define EGL_NOT_INITIALIZED 0x3001
#define EGL_NO_CONTEXT EGL_CAST(EGLContext, 0)
#define EGL_NO_DISPLAY EGL_CAST(EGLDisplay, 0)
#define EGL_NO_IMAGE EGL_CAST(EGLImage, 0)
#define EGL_NO_RESET_NOTIFICATION 0x31BE
#define EGL_NO_SURFACE EGL_CAST(EGLSurface, 0)
#define EGL_NO_SYNC EGL_CAST(EGLSync, 0)
#define EGL_NO_TEXTURE 0x305C
#define EGL_OPENGL_API 0x30A2
#define EGL_OPENGL_BIT 0x0008
#define EGL_OPENGL_ES2_BIT 0x0004
#define EGL_OPENGL_ES3_BIT 0x00000040
#define EGL_OPENGL_ES_API 0x30A0
#define EGL_OPENGL_ES_BIT 0x0001
#define EGL_OPENVG_API 0x30A1
#define EGL_OPENVG_BIT 0x0002
#define EGL_OPENVG_IMAGE 0x3096
#define EGL_PBUFFER_BIT 0x0001
#define EGL_PIXEL_ASPECT_RATIO 0x3092
#define EGL_PIXMAP_BIT 0x0002
#define EGL_READ 0x305A
#define EGL_RED_SIZE 0x3024
#define EGL_RENDERABLE_TYPE 0x3040
#define EGL_RENDER_BUFFER 0x3086
#define EGL_RGB_BUFFER 0x308E
#define EGL_SAMPLES 0x3031
#define EGL_SAMPLE_BUFFERS 0x3032
#define EGL_SIGNALED 0x30F2
#define EGL_SINGLE_BUFFER 0x3085
#define EGL_SLOW_CONFIG 0x3050
#define EGL_STENCIL_SIZE 0x3026
#define EGL_SUCCESS 0x3000
#define EGL_SURFACE_TYPE 0x3033
#define EGL_SWAP_BEHAVIOR 0x3093
#define EGL_SWAP_BEHAVIOR_PRESERVED_BIT 0x0400
#define EGL_SYNC_CL_EVENT 0x30FE
#define EGL_SYNC_CL_EVENT_COMPLETE 0x30FF
#define EGL_SYNC_CONDITION 0x30F8
#define EGL_SYNC_FENCE 0x30F9
#define EGL_SYNC_FLUSH_COMMANDS_BIT 0x0001
#define EGL_SYNC_PRIOR_COMMANDS_COMPLETE 0x30F0
#define EGL_SYNC_STATUS 0x30F1
#define EGL_SYNC_TYPE 0x30F7
#define EGL_TEXTURE_2D 0x305F
#define EGL_TEXTURE_FORMAT 0x3080
#define EGL_TEXTURE_RGB 0x305D
#define EGL_TEXTURE_RGBA 0x305E
#define EGL_TEXTURE_TARGET 0x3081
#define EGL_TIMEOUT_EXPIRED 0x30F5
#define EGL_TRANSPARENT_BLUE_VALUE 0x3035
#define EGL_TRANSPARENT_GREEN_VALUE 0x3036
#define EGL_TRANSPARENT_RED_VALUE 0x3037
#define EGL_TRANSPARENT_RGB 0x3052
#define EGL_TRANSPARENT_TYPE 0x3034
#define EGL_TRUE 1
#define EGL_UNKNOWN EGL_CAST(EGLint, -1)
#define EGL_UNSIGNALED 0x30F3
#define EGL_VENDOR 0x3053
#define EGL_VERSION 0x3054
#define EGL_VERTICAL_RESOLUTION 0x3091
#define EGL_VG_ALPHA_FORMAT 0x3088
#define EGL_VG_ALPHA_FORMAT_NONPRE 0x308B
#define EGL_VG_ALPHA_FORMAT_PRE 0x308C
#define EGL_VG_ALPHA_FORMAT_PRE_BIT 0x0040
#define EGL_VG_COLORSPACE 0x3087
#define EGL_VG_COLORSPACE_LINEAR 0x308A
#define EGL_VG_COLORSPACE_LINEAR_BIT 0x0020
#define EGL_VG_COLORSPACE_sRGB 0x3089
#define EGL_WIDTH 0x3057
#define EGL_WINDOW_BIT 0x0004
#include <KHR/khrplatform.h>
#include <EGL/eglplatform.h>
struct AHardwareBuffer;
struct wl_buffer;
struct wl_display;
struct wl_resource;
typedef unsigned int EGLBoolean;
typedef unsigned int EGLenum;
typedef intptr_t EGLAttribKHR;
typedef intptr_t EGLAttrib;
typedef void *EGLClientBuffer;
typedef void *EGLConfig;
typedef void *EGLContext;
typedef void *EGLDeviceEXT;
typedef void *EGLDisplay;
typedef void *EGLImage;
typedef void *EGLImageKHR;
typedef void *EGLLabelKHR;
typedef void *EGLObjectKHR;
typedef void *EGLOutputLayerEXT;
typedef void *EGLOutputPortEXT;
typedef void *EGLStreamKHR;
typedef void *EGLSurface;
typedef void *EGLSync;
typedef void *EGLSyncKHR;
typedef void *EGLSyncNV;
typedef void (*__eglMustCastToProperFunctionPointerType)(void);
typedef khronos_utime_nanoseconds_t EGLTimeKHR;
typedef khronos_utime_nanoseconds_t EGLTime;
typedef khronos_utime_nanoseconds_t EGLTimeNV;
typedef khronos_utime_nanoseconds_t EGLuint64NV;
typedef khronos_uint64_t EGLuint64KHR;
typedef khronos_stime_nanoseconds_t EGLnsecsANDROID;
typedef int EGLNativeFileDescriptorKHR;
typedef khronos_ssize_t EGLsizeiANDROID;
typedef void (*EGLSetBlobFuncANDROID)(const void *key, EGLsizeiANDROID keySize, const void *value, EGLsizeiANDROID valueSize);
typedef EGLsizeiANDROID (*EGLGetBlobFuncANDROID)(const void *key, EGLsizeiANDROID keySize, void *value, EGLsizeiANDROID valueSize);
struct EGLClientPixmapHI {
void *pData;
EGLint iWidth;
EGLint iHeight;
EGLint iStride;
};
typedef void(GLAD_API_PTR *EGLDEBUGPROCKHR)(EGLenum error, const char *command, EGLint messageType, EGLLabelKHR threadLabel, EGLLabelKHR objectLabel, const char *message);
#define PFNEGLBINDWAYLANDDISPLAYWL PFNEGLBINDWAYLANDDISPLAYWLPROC
#define PFNEGLUNBINDWAYLANDDISPLAYWL PFNEGLUNBINDWAYLANDDISPLAYWLPROC
#define PFNEGLQUERYWAYLANDBUFFERWL PFNEGLQUERYWAYLANDBUFFERWLPROC
#define PFNEGLCREATEWAYLANDBUFFERFROMIMAGEWL PFNEGLCREATEWAYLANDBUFFERFROMIMAGEWLPROC
#define EGL_VERSION_1_0 1
GLAD_API_CALL int GLAD_EGL_VERSION_1_0;
#define EGL_VERSION_1_1 1
GLAD_API_CALL int GLAD_EGL_VERSION_1_1;
#define EGL_VERSION_1_2 1
GLAD_API_CALL int GLAD_EGL_VERSION_1_2;
#define EGL_VERSION_1_3 1
GLAD_API_CALL int GLAD_EGL_VERSION_1_3;
#define EGL_VERSION_1_4 1
GLAD_API_CALL int GLAD_EGL_VERSION_1_4;
#define EGL_VERSION_1_5 1
GLAD_API_CALL int GLAD_EGL_VERSION_1_5;
typedef EGLBoolean(GLAD_API_PTR *PFNEGLBINDAPIPROC)(EGLenum api);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLBINDTEXIMAGEPROC)(EGLDisplay dpy, EGLSurface surface, EGLint buffer);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLCHOOSECONFIGPROC)(EGLDisplay dpy, const EGLint *attrib_list, EGLConfig *configs, EGLint config_size, EGLint *num_config);
typedef EGLint(GLAD_API_PTR *PFNEGLCLIENTWAITSYNCPROC)(EGLDisplay dpy, EGLSync sync, EGLint flags, EGLTime timeout);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLCOPYBUFFERSPROC)(EGLDisplay dpy, EGLSurface surface, EGLNativePixmapType target);
typedef EGLContext(GLAD_API_PTR *PFNEGLCREATECONTEXTPROC)(EGLDisplay dpy, EGLConfig config, EGLContext share_context, const EGLint *attrib_list);
typedef EGLImage(GLAD_API_PTR *PFNEGLCREATEIMAGEPROC)(EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLAttrib *attrib_list);
typedef EGLSurface(GLAD_API_PTR *PFNEGLCREATEPBUFFERFROMCLIENTBUFFERPROC)(EGLDisplay dpy, EGLenum buftype, EGLClientBuffer buffer, EGLConfig config, const EGLint *attrib_list);
typedef EGLSurface(GLAD_API_PTR *PFNEGLCREATEPBUFFERSURFACEPROC)(EGLDisplay dpy, EGLConfig config, const EGLint *attrib_list);
typedef EGLSurface(GLAD_API_PTR *PFNEGLCREATEPIXMAPSURFACEPROC)(EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, const EGLint *attrib_list);
typedef EGLSurface(GLAD_API_PTR *PFNEGLCREATEPLATFORMPIXMAPSURFACEPROC)(EGLDisplay dpy, EGLConfig config, void *native_pixmap, const EGLAttrib *attrib_list);
typedef EGLSurface(GLAD_API_PTR *PFNEGLCREATEPLATFORMWINDOWSURFACEPROC)(EGLDisplay dpy, EGLConfig config, void *native_window, const EGLAttrib *attrib_list);
typedef EGLSync(GLAD_API_PTR *PFNEGLCREATESYNCPROC)(EGLDisplay dpy, EGLenum type, const EGLAttrib *attrib_list);
typedef EGLSurface(GLAD_API_PTR *PFNEGLCREATEWINDOWSURFACEPROC)(EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, const EGLint *attrib_list);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLDESTROYCONTEXTPROC)(EGLDisplay dpy, EGLContext ctx);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLDESTROYIMAGEPROC)(EGLDisplay dpy, EGLImage image);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLDESTROYSURFACEPROC)(EGLDisplay dpy, EGLSurface surface);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLDESTROYSYNCPROC)(EGLDisplay dpy, EGLSync sync);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLGETCONFIGATTRIBPROC)(EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint *value);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLGETCONFIGSPROC)(EGLDisplay dpy, EGLConfig *configs, EGLint config_size, EGLint *num_config);
typedef EGLContext(GLAD_API_PTR *PFNEGLGETCURRENTCONTEXTPROC)(void);
typedef EGLDisplay(GLAD_API_PTR *PFNEGLGETCURRENTDISPLAYPROC)(void);
typedef EGLSurface(GLAD_API_PTR *PFNEGLGETCURRENTSURFACEPROC)(EGLint readdraw);
typedef EGLDisplay(GLAD_API_PTR *PFNEGLGETDISPLAYPROC)(EGLNativeDisplayType display_id);
typedef EGLint(GLAD_API_PTR *PFNEGLGETERRORPROC)(void);
typedef EGLDisplay(GLAD_API_PTR *PFNEGLGETPLATFORMDISPLAYPROC)(EGLenum platform, void *native_display, const EGLAttrib *attrib_list);
typedef __eglMustCastToProperFunctionPointerType(GLAD_API_PTR *PFNEGLGETPROCADDRESSPROC)(const char *procname);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLGETSYNCATTRIBPROC)(EGLDisplay dpy, EGLSync sync, EGLint attribute, EGLAttrib *value);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLINITIALIZEPROC)(EGLDisplay dpy, EGLint *major, EGLint *minor);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLMAKECURRENTPROC)(EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx);
typedef EGLenum(GLAD_API_PTR *PFNEGLQUERYAPIPROC)(void);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLQUERYCONTEXTPROC)(EGLDisplay dpy, EGLContext ctx, EGLint attribute, EGLint *value);
typedef const char *(GLAD_API_PTR *PFNEGLQUERYSTRINGPROC)(EGLDisplay dpy, EGLint name);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLQUERYSURFACEPROC)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint *value);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLRELEASETEXIMAGEPROC)(EGLDisplay dpy, EGLSurface surface, EGLint buffer);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLRELEASETHREADPROC)(void);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLSURFACEATTRIBPROC)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint value);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLSWAPBUFFERSPROC)(EGLDisplay dpy, EGLSurface surface);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLSWAPINTERVALPROC)(EGLDisplay dpy, EGLint interval);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLTERMINATEPROC)(EGLDisplay dpy);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLWAITCLIENTPROC)(void);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLWAITGLPROC)(void);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLWAITNATIVEPROC)(EGLint engine);
typedef EGLBoolean(GLAD_API_PTR *PFNEGLWAITSYNCPROC)(EGLDisplay dpy, EGLSync sync, EGLint flags);
typedef EGLImageKHR(EGLAPIENTRYP PFNEGLCREATEIMAGEKHRPROC)(EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLint *attrib_list);
typedef EGLBoolean(EGLAPIENTRYP PFNEGLDESTROYIMAGEKHRPROC)(EGLDisplay dpy, EGLImageKHR image);
GLAD_API_CALL PFNEGLCREATEIMAGEKHRPROC glad_eglCreateImageKHR;
#define eglCreateImageKHR glad_eglCreateImageKHR
GLAD_API_CALL PFNEGLDESTROYIMAGEKHRPROC glad_eglDestroyImageKHR;
#define eglDestroyImageKHR glad_eglDestroyImageKHR
GLAD_API_CALL PFNEGLBINDAPIPROC glad_eglBindAPI;
#define eglBindAPI glad_eglBindAPI
GLAD_API_CALL PFNEGLBINDTEXIMAGEPROC glad_eglBindTexImage;
#define eglBindTexImage glad_eglBindTexImage
GLAD_API_CALL PFNEGLCHOOSECONFIGPROC glad_eglChooseConfig;
#define eglChooseConfig glad_eglChooseConfig
GLAD_API_CALL PFNEGLCLIENTWAITSYNCPROC glad_eglClientWaitSync;
#define eglClientWaitSync glad_eglClientWaitSync
GLAD_API_CALL PFNEGLCOPYBUFFERSPROC glad_eglCopyBuffers;
#define eglCopyBuffers glad_eglCopyBuffers
GLAD_API_CALL PFNEGLCREATECONTEXTPROC glad_eglCreateContext;
#define eglCreateContext glad_eglCreateContext
GLAD_API_CALL PFNEGLCREATEIMAGEPROC glad_eglCreateImage;
#define eglCreateImage glad_eglCreateImage
GLAD_API_CALL PFNEGLCREATEPBUFFERFROMCLIENTBUFFERPROC glad_eglCreatePbufferFromClientBuffer;
#define eglCreatePbufferFromClientBuffer glad_eglCreatePbufferFromClientBuffer
GLAD_API_CALL PFNEGLCREATEPBUFFERSURFACEPROC glad_eglCreatePbufferSurface;
#define eglCreatePbufferSurface glad_eglCreatePbufferSurface
GLAD_API_CALL PFNEGLCREATEPIXMAPSURFACEPROC glad_eglCreatePixmapSurface;
#define eglCreatePixmapSurface glad_eglCreatePixmapSurface
GLAD_API_CALL PFNEGLCREATEPLATFORMPIXMAPSURFACEPROC glad_eglCreatePlatformPixmapSurface;
#define eglCreatePlatformPixmapSurface glad_eglCreatePlatformPixmapSurface
GLAD_API_CALL PFNEGLCREATEPLATFORMWINDOWSURFACEPROC glad_eglCreatePlatformWindowSurface;
#define eglCreatePlatformWindowSurface glad_eglCreatePlatformWindowSurface
GLAD_API_CALL PFNEGLCREATESYNCPROC glad_eglCreateSync;
#define eglCreateSync glad_eglCreateSync
GLAD_API_CALL PFNEGLCREATEWINDOWSURFACEPROC glad_eglCreateWindowSurface;
#define eglCreateWindowSurface glad_eglCreateWindowSurface
GLAD_API_CALL PFNEGLDESTROYCONTEXTPROC glad_eglDestroyContext;
#define eglDestroyContext glad_eglDestroyContext
GLAD_API_CALL PFNEGLDESTROYIMAGEPROC glad_eglDestroyImage;
#define eglDestroyImage glad_eglDestroyImage
GLAD_API_CALL PFNEGLDESTROYSURFACEPROC glad_eglDestroySurface;
#define eglDestroySurface glad_eglDestroySurface
GLAD_API_CALL PFNEGLDESTROYSYNCPROC glad_eglDestroySync;
#define eglDestroySync glad_eglDestroySync
GLAD_API_CALL PFNEGLGETCONFIGATTRIBPROC glad_eglGetConfigAttrib;
#define eglGetConfigAttrib glad_eglGetConfigAttrib
GLAD_API_CALL PFNEGLGETCONFIGSPROC glad_eglGetConfigs;
#define eglGetConfigs glad_eglGetConfigs
GLAD_API_CALL PFNEGLGETCURRENTCONTEXTPROC glad_eglGetCurrentContext;
#define eglGetCurrentContext glad_eglGetCurrentContext
GLAD_API_CALL PFNEGLGETCURRENTDISPLAYPROC glad_eglGetCurrentDisplay;
#define eglGetCurrentDisplay glad_eglGetCurrentDisplay
GLAD_API_CALL PFNEGLGETCURRENTSURFACEPROC glad_eglGetCurrentSurface;
#define eglGetCurrentSurface glad_eglGetCurrentSurface
GLAD_API_CALL PFNEGLGETDISPLAYPROC glad_eglGetDisplay;
#define eglGetDisplay glad_eglGetDisplay
GLAD_API_CALL PFNEGLGETERRORPROC glad_eglGetError;
#define eglGetError glad_eglGetError
GLAD_API_CALL PFNEGLGETPLATFORMDISPLAYPROC glad_eglGetPlatformDisplay;
#define eglGetPlatformDisplay glad_eglGetPlatformDisplay
GLAD_API_CALL PFNEGLGETPROCADDRESSPROC glad_eglGetProcAddress;
#define eglGetProcAddress glad_eglGetProcAddress
GLAD_API_CALL PFNEGLGETSYNCATTRIBPROC glad_eglGetSyncAttrib;
#define eglGetSyncAttrib glad_eglGetSyncAttrib
GLAD_API_CALL PFNEGLINITIALIZEPROC glad_eglInitialize;
#define eglInitialize glad_eglInitialize
GLAD_API_CALL PFNEGLMAKECURRENTPROC glad_eglMakeCurrent;
#define eglMakeCurrent glad_eglMakeCurrent
GLAD_API_CALL PFNEGLQUERYAPIPROC glad_eglQueryAPI;
#define eglQueryAPI glad_eglQueryAPI
GLAD_API_CALL PFNEGLQUERYCONTEXTPROC glad_eglQueryContext;
#define eglQueryContext glad_eglQueryContext
GLAD_API_CALL PFNEGLQUERYSTRINGPROC glad_eglQueryString;
#define eglQueryString glad_eglQueryString
GLAD_API_CALL PFNEGLQUERYSURFACEPROC glad_eglQuerySurface;
#define eglQuerySurface glad_eglQuerySurface
GLAD_API_CALL PFNEGLRELEASETEXIMAGEPROC glad_eglReleaseTexImage;
#define eglReleaseTexImage glad_eglReleaseTexImage
GLAD_API_CALL PFNEGLRELEASETHREADPROC glad_eglReleaseThread;
#define eglReleaseThread glad_eglReleaseThread
GLAD_API_CALL PFNEGLSURFACEATTRIBPROC glad_eglSurfaceAttrib;
#define eglSurfaceAttrib glad_eglSurfaceAttrib
GLAD_API_CALL PFNEGLSWAPBUFFERSPROC glad_eglSwapBuffers;
#define eglSwapBuffers glad_eglSwapBuffers
GLAD_API_CALL PFNEGLSWAPINTERVALPROC glad_eglSwapInterval;
#define eglSwapInterval glad_eglSwapInterval
GLAD_API_CALL PFNEGLTERMINATEPROC glad_eglTerminate;
#define eglTerminate glad_eglTerminate
GLAD_API_CALL PFNEGLWAITCLIENTPROC glad_eglWaitClient;
#define eglWaitClient glad_eglWaitClient
GLAD_API_CALL PFNEGLWAITGLPROC glad_eglWaitGL;
#define eglWaitGL glad_eglWaitGL
GLAD_API_CALL PFNEGLWAITNATIVEPROC glad_eglWaitNative;
#define eglWaitNative glad_eglWaitNative
GLAD_API_CALL PFNEGLWAITSYNCPROC glad_eglWaitSync;
#define eglWaitSync glad_eglWaitSync
GLAD_API_CALL int
gladLoadEGLUserPtr(EGLDisplay display, GLADuserptrloadfunc load, void *userptr);
GLAD_API_CALL int
gladLoadEGL(EGLDisplay display, GLADloadfunc load);
#ifdef GLAD_EGL
GLAD_API_CALL int
gladLoaderLoadEGL(EGLDisplay display);
GLAD_API_CALL void
gladLoaderUnloadEGL(void);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 22,559
|
C++
|
.h
| 506
| 42.482213
| 176
| 0.824287
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| true
| false
| false
| false
| false
| false
| false
|
4,099
|
eglplatform.h
|
LizardByte_Sunshine/third-party/glad/include/EGL/eglplatform.h
|
#ifndef __eglplatform_h_
#define __eglplatform_h_
/*
** Copyright 2007-2020 The Khronos Group Inc.
** SPDX-License-Identifier: Apache-2.0
*/
/* Platform-specific types and definitions for egl.h
*
* Adopters may modify khrplatform.h and this file to suit their platform.
* You are encouraged to submit all modifications to the Khronos group so that
* they can be included in future versions of this file. Please submit changes
* by filing an issue or pull request on the public Khronos EGL Registry, at
* https://www.github.com/KhronosGroup/EGL-Registry/
*/
#include <KHR/khrplatform.h>
/* Macros used in EGL function prototype declarations.
*
* EGL functions should be prototyped as:
*
* EGLAPI return-type EGLAPIENTRY eglFunction(arguments);
* typedef return-type (EXPAPIENTRYP PFNEGLFUNCTIONPROC) (arguments);
*
* KHRONOS_APICALL and KHRONOS_APIENTRY are defined in KHR/khrplatform.h
*/
#ifndef EGLAPI
#define EGLAPI KHRONOS_APICALL
#endif
#ifndef EGLAPIENTRY
#define EGLAPIENTRY KHRONOS_APIENTRY
#endif
#define EGLAPIENTRYP EGLAPIENTRY *
/* The types NativeDisplayType, NativeWindowType, and NativePixmapType
* are aliases of window-system-dependent types, such as X Display * or
* Windows Device Context. They must be defined in platform-specific
* code below. The EGL-prefixed versions of Native*Type are the same
* types, renamed in EGL 1.3 so all types in the API start with "EGL".
*
* Khronos STRONGLY RECOMMENDS that you use the default definitions
* provided below, since these changes affect both binary and source
* portability of applications using EGL running on different EGL
* implementations.
*/
#if defined(EGL_NO_PLATFORM_SPECIFIC_TYPES)
typedef void *EGLNativeDisplayType;
typedef void *EGLNativePixmapType;
typedef void *EGLNativeWindowType;
#elif defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__) /* Win32 and WinCE */
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN 1
#endif
#include <windows.h>
typedef HDC EGLNativeDisplayType;
typedef HBITMAP EGLNativePixmapType;
typedef HWND EGLNativeWindowType;
#elif defined(__EMSCRIPTEN__)
typedef int EGLNativeDisplayType;
typedef int EGLNativePixmapType;
typedef int EGLNativeWindowType;
#elif defined(__WINSCW__) || defined(__SYMBIAN32__) /* Symbian */
typedef int EGLNativeDisplayType;
typedef void *EGLNativePixmapType;
typedef void *EGLNativeWindowType;
#elif defined(WL_EGL_PLATFORM)
typedef struct wl_display *EGLNativeDisplayType;
typedef struct wl_egl_pixmap *EGLNativePixmapType;
typedef struct wl_egl_window *EGLNativeWindowType;
#elif defined(__GBM__)
typedef struct gbm_device *EGLNativeDisplayType;
typedef struct gbm_bo *EGLNativePixmapType;
typedef void *EGLNativeWindowType;
#elif defined(__ANDROID__) || defined(ANDROID)
struct ANativeWindow;
struct egl_native_pixmap_t;
typedef void *EGLNativeDisplayType;
typedef struct egl_native_pixmap_t *EGLNativePixmapType;
typedef struct ANativeWindow *EGLNativeWindowType;
#elif defined(USE_OZONE)
typedef intptr_t EGLNativeDisplayType;
typedef intptr_t EGLNativePixmapType;
typedef intptr_t EGLNativeWindowType;
#elif defined(__unix__) && defined(EGL_NO_X11)
typedef void *EGLNativeDisplayType;
typedef khronos_uintptr_t EGLNativePixmapType;
typedef khronos_uintptr_t EGLNativeWindowType;
#elif defined(__unix__) || defined(USE_X11)
/* X11 (tentative) */
#include <X11/Xlib.h>
#include <X11/Xutil.h>
typedef Display *EGLNativeDisplayType;
typedef Pixmap EGLNativePixmapType;
typedef Window EGLNativeWindowType;
#elif defined(__APPLE__)
typedef int EGLNativeDisplayType;
typedef void *EGLNativePixmapType;
typedef void *EGLNativeWindowType;
#elif defined(__HAIKU__)
#include <kernel/image.h>
typedef void *EGLNativeDisplayType;
typedef khronos_uintptr_t EGLNativePixmapType;
typedef khronos_uintptr_t EGLNativeWindowType;
#elif defined(__Fuchsia__)
typedef void *EGLNativeDisplayType;
typedef khronos_uintptr_t EGLNativePixmapType;
typedef khronos_uintptr_t EGLNativeWindowType;
#else
#error "Platform not recognized"
#endif
/* EGL 1.2 types, renamed for consistency in EGL 1.3 */
typedef EGLNativeDisplayType NativeDisplayType;
typedef EGLNativePixmapType NativePixmapType;
typedef EGLNativeWindowType NativeWindowType;
/* Define EGLint. This must be a signed integral type large enough to contain
* all legal attribute names and values passed into and out of EGL, whether
* their type is boolean, bitmask, enumerant (symbolic constant), integer,
* handle, or other. While in general a 32-bit integer will suffice, if
* handles are 64 bit types, then EGLint should be defined as a signed 64-bit
* integer type.
*/
typedef khronos_int32_t EGLint;
/* C++ / C typecast macros for special EGL handle values */
#if defined(__cplusplus)
#define EGL_CAST(type, value) (static_cast<type>(value))
#else
#define EGL_CAST(type, value) ((type) (value))
#endif
#endif /* __eglplatform_h */
| 4,954
|
C++
|
.h
| 126
| 37.531746
| 118
| 0.799499
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| true
| false
| false
| false
| false
| false
| false
|
4,101
|
NvFBC.h
|
LizardByte_Sunshine/third-party/nvfbc/NvFBC.h
|
/*!
* \file
*
* This file contains the interface constants, structure definitions and
* function prototypes defining the NvFBC API for Linux.
*
* Copyright (c) 2013-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVFBC_H_
#define _NVFBC_H_
#include <stdint.h>
/*!
* \mainpage NVIDIA Framebuffer Capture (NvFBC) for Linux.
*
* NvFBC is a high performance, low latency API to capture the framebuffer of
* an X server screen.
*
* The output from NvFBC captures everything that would be visible if we were
* directly looking at the monitor. This includes window manager decoration,
* mouse cursor, overlay, etc.
*
* It is ideally suited to desktop or fullscreen application capture and
* remoting.
*/
/*!
* \defgroup FBC_REQ Requirements
*
* The following requirements are provided by the regular NVIDIA Display Driver
* package:
*
* - OpenGL core >= 4.2:
* Required. NvFBC relies on OpenGL to perform frame capture and
* post-processing.
*
* - Vulkan 1.1:
* Required.
*
* - libcuda.so.1 >= 5.5:
* Optional. Used for capture to video memory with CUDA interop.
*
* The following requirements must be installed separately depending on the
* Linux distribution being used:
*
* - XRandR extension >= 1.2:
* Optional. Used for RandR output tracking.
*
* - libX11-xcb.so.1 >= 1.2:
* Required. NvFBC uses a mix of Xlib and XCB. Xlib is needed to use GLX,
* XCB is needed to make NvFBC more resilient against X server terminations
* while a capture session is active.
*
* - libxcb.so.1 >= 1.3:
* Required. See above.
*
* - xorg-server >= 1.3:
* Optional. Required for push model to work properly.
*
* Note that all optional dependencies are dlopen()'d at runtime. Failure to
* load an optional library is not fatal.
*/
/*!
* \defgroup FBC_CHANGES ChangeLog
*
* NvFBC Linux API version 0.1
* - Initial BETA release.
*
* NvFBC Linux API version 0.2
* - Added 'bEnableMSE' field to NVFBC_H264_HW_ENC_CONFIG.
* - Added 'dwMSE' field to NVFBC_TOH264_GRAB_FRAME_PARAMS.
* - Added 'bEnableAQ' field to NVFBC_H264_HW_ENC_CONFIG.
* - Added 'NVFBC_H264_PRESET_LOSSLESS_HP' enum to NVFBC_H264_PRESET.
* - Added 'NVFBC_BUFFER_FORMAT_YUV444P' enum to NVFBC_BUFFER_FORMAT.
* - Added 'eInputBufferFormat' field to NVFBC_H264_HW_ENC_CONFIG.
* - Added '0' and '244' values for NVFBC_H264_HW_ENC_CONFIG::dwProfile.
*
* NvFBC Linux API version 0.3
* - Improved multi-threaded support by implementing an API locking mechanism.
* - Added 'nvFBCBindContext' API entry point.
* - Added 'nvFBCReleaseContext' API entry point.
*
* NvFBC Linux API version 1.0
* - Added codec agnostic interface for HW encoding.
* - Deprecated H.264 interface.
* - Added support for H.265/HEVC HW encoding.
*
* NvFBC Linux API version 1.1
* - Added 'nvFBCToHwGetCaps' API entry point.
* - Added 'dwDiffMapScalingFactor' field to NVFBC_TOSYS_SETUP_PARAMS.
*
* NvFBC Linux API version 1.2
* - Deprecated ToHwEnc interface.
* - Added ToGL interface that captures frames to an OpenGL texture in video
* memory.
* - Added 'bDisableAutoModesetRecovery' field to
* NVFBC_CREATE_CAPTURE_SESSION_PARAMS.
* - Added 'bExternallyManagedContext' field to NVFBC_CREATE_HANDLE_PARAMS.
*
* NvFBC Linux API version 1.3
* - Added NVFBC_BUFFER_FORMAT_RGBA
* - Added 'dwTimeoutMs' field to NVFBC_TOSYS_GRAB_FRAME_PARAMS,
* NVFBC_TOCUDA_GRAB_FRAME_PARAMS, and NVFBC_TOGL_GRAB_FRAME_PARAMS.
*
* NvFBC Linux API version 1.4
* - Clarified that NVFBC_BUFFER_FORMAT_{ARGB,RGB,RGBA} are byte-order formats.
* - Renamed NVFBC_BUFFER_FORMAT_YUV420P to NVFBC_BUFFER_FORMAT_NV12.
* - Added new requirements.
* - Made NvFBC more resilient against the X server terminating during an active
* capture session. See new comments for ::NVFBC_ERR_X.
* - Relaxed requirement that 'frameSize' must have a width being a multiple of
* 4 and a height being a multiple of 2.
* - Added 'bRoundFrameSize' field to NVFBC_CREATE_CAPTURE_SESSION_PARAMS.
* - Relaxed requirement that the scaling factor for differential maps must be
* a multiple of the size of the frame.
* - Added 'diffMapSize' field to NVFBC_TOSYS_SETUP_PARAMS and
* NVFBC_TOGL_SETUP_PARAMS.
*
* NvFBC Linux API version 1.5
* - Added NVFBC_BUFFER_FORMAT_BGRA
*
* NvFBC Linux API version 1.6
* - Added the 'NVFBC_TOSYS_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY',
* 'NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY', and
* 'NVFBC_TOGL_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY' capture flags.
* - Exposed debug and performance logs through the NVFBC_LOG_LEVEL environment
* variable. Setting it to "1" enables performance logs, setting it to "2"
* enables debugging logs, setting it to "3" enables both.
* - Logs are printed to stdout or to the file pointed by the NVFBC_LOG_FILE
* environment variable.
* - Added 'ulTimestampUs' to NVFBC_FRAME_GRAB_INFO.
* - Added 'dwSamplingRateMs' to NVFBC_CREATE_CAPTURE_SESSION_PARAMS.
* - Added 'bPushModel' to NVFBC_CREATE_CAPTURE_SESSION_PARAMS.
*
* NvFBC Linux API version 1.7
* - Retired the NVFBC_CAPTURE_TO_HW_ENCODER interface.
* This interface has been deprecated since NvFBC 1.2 and has received no
* updates or new features since. We recommend using the NVIDIA Video Codec
* SDK to encode NvFBC frames.
* See: https://developer.nvidia.com/nvidia-video-codec-sdk
* - Added a 'Capture Modes' section to those headers.
* - Added a 'Post Processing' section to those headers.
* - Added an 'Environment Variables' section to those headers.
* - Added 'bInModeset' to NVFBC_GET_STATUS_PARAMS.
* - Added 'bAllowDirectCapture' to NVFBC_CREATE_CAPTURE_SESSION_PARAMS.
* - Added 'bDirectCaptured' to NVFBC_FRAME_GRAB_INFO.
* - Added 'bRequiredPostProcessing' to NVFBC_FRAME_GRAB_INFO.
*/
/*!
* \defgroup FBC_MODES Capture Modes
*
* When creating a capture session, NvFBC instantiates a capture subsystem
* living in the NVIDIA X driver.
*
* This subsystem listens for damage events coming from applications then
* generates (composites) frames for NvFBC when new content is available.
*
* This capture server can operate on a timer where it periodically checks if
* there are any pending damage events, or it can generate frames as soon as it
* receives a new damage event.
* See NVFBC_CREATE_CAPTURE_SESSION_PARAMS::dwSamplingRateMs,
* and NVFBC_CREATE_CAPTURE_SESSION_PARAMS::bPushModel.
*
* NvFBC can also attach itself to a fullscreen unoccluded application and have
* it copy its frames directly into a buffer owned by NvFBC upon present. This
* mode bypasses the X server.
* See NVFBC_CREATE_CAPTURE_SESSION_PARAMS::bAllowDirectCapture.
*
* NvFBC is designed to capture frames with as few copies as possible. The
* NVIDIA X driver composites frames directly into the NvFBC buffers, and
* direct capture copies frames directly into these buffers as well.
*
* Depending on the configuration of a capture session, an extra copy (rendering
* pass) may be needed. See the 'Post Processing' section.
*/
/*!
* \defgroup FBC_PP Post Processing
*
* Depending on the configuration of a capture session, NvFBC might require to
* do post processing on frames.
*
* Post processing is required for the following reasons:
* - NvFBC needs to do a pixel format conversion.
* - Diffmaps are requested.
* - Capture to system memory is requested.
*
* NvFBC needs to do a conversion if the requested pixel format does not match
* the native format. The native format is NVFBC_BUFFER_FORMAT_BGRA.
*
* Note: post processing is *not* required for frame scaling and frame cropping.
*
* Skipping post processing can reduce capture latency. An application can know
* whether post processing was required by checking
* NVFBC_FRAME_GRAB_INFO::bRequiredPostProcessing.
*/
/*!
* \defgroup FBC_ENVVAR Environment Variables
*
* Below are the environment variables supported by NvFBC:
*
* - NVFBC_LOG_LEVEL
* Bitfield where the first bit enables debug logs and the second bit enables
* performance logs. Both can be enabled by setting this envvar to 3.
*
* - NVFBC_LOG_FILE
* Write all NvFBC logs to the given file.
*
* - NVFBC_FORCE_ALLOW_DIRECT_CAPTURE
* Used to override NVFBC_CREATE_CAPTURE_SESSION_PARAMS::bAllowDirectCapture.
*
* - NVFBC_FORCE_POST_PROCESSING
* Used to force the post processing step, even if it could be skipped.
* See the 'Post Processing' section.
*/
/*!
* \defgroup FBC_STRUCT Structure Definition
*
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/*!
* Calling convention.
*/
#define NVFBCAPI
/*!
* NvFBC API major version.
*/
#define NVFBC_VERSION_MAJOR 1
/*!
* NvFBC API minor version.
*/
#define NVFBC_VERSION_MINOR 7
/*!
* NvFBC API version.
*/
#define NVFBC_VERSION (uint32_t)(NVFBC_VERSION_MINOR | (NVFBC_VERSION_MAJOR << 8))
/*!
* Creates a version number for structure parameters.
*/
#define NVFBC_STRUCT_VERSION(typeName, ver) \
(uint32_t)(sizeof(typeName) | ((ver) << 16) | (NVFBC_VERSION << 24))
/*!
* Defines error codes.
*
* \see NvFBCGetLastErrorStr
*/
typedef enum _NVFBCSTATUS {
/*!
* This indicates that the API call returned with no errors.
*/
NVFBC_SUCCESS = 0,
/*!
* This indicates that the API version between the client and the library
* is not compatible.
*/
NVFBC_ERR_API_VERSION = 1,
/*!
* An internal error occurred.
*/
NVFBC_ERR_INTERNAL = 2,
/*!
* This indicates that one or more of the parameter passed to the API call
* is invalid.
*/
NVFBC_ERR_INVALID_PARAM = 3,
/*!
* This indicates that one or more of the pointers passed to the API call
* is invalid.
*/
NVFBC_ERR_INVALID_PTR = 4,
/*!
* This indicates that the handle passed to the API call to identify the
* client is invalid.
*/
NVFBC_ERR_INVALID_HANDLE = 5,
/*!
* This indicates that the maximum number of threaded clients of the same
* process has been reached. The limit is 10 threads per process.
* There is no limit on the number of process.
*/
NVFBC_ERR_MAX_CLIENTS = 6,
/*!
* This indicates that the requested feature is not currently supported
* by the library.
*/
NVFBC_ERR_UNSUPPORTED = 7,
/*!
* This indicates that the API call failed because it was unable to allocate
* enough memory to perform the requested operation.
*/
NVFBC_ERR_OUT_OF_MEMORY = 8,
/*!
* This indicates that the API call was not expected. This happens when
* API calls are performed in a wrong order, such as trying to capture
* a frame prior to creating a new capture session; or trying to set up
* a capture to video memory although a capture session to system memory
* was created.
*/
NVFBC_ERR_BAD_REQUEST = 9,
/*!
* This indicates an X error, most likely meaning that the X server has
* been terminated. When this error is returned, the only resort is to
* create another FBC handle using NvFBCCreateHandle().
*
* The previous handle should still be freed with NvFBCDestroyHandle(), but
* it might leak resources, in particular X, GLX, and GL resources since
* it is no longer possible to communicate with an X server to free them
* through the driver.
*
* The best course of action to eliminate this potential leak is to close
* the OpenGL driver, close the forked process running the capture, or
* restart the application.
*/
NVFBC_ERR_X = 10,
/*!
* This indicates a GLX error.
*/
NVFBC_ERR_GLX = 11,
/*!
* This indicates an OpenGL error.
*/
NVFBC_ERR_GL = 12,
/*!
* This indicates a CUDA error.
*/
NVFBC_ERR_CUDA = 13,
/*!
* This indicates a HW encoder error.
*/
NVFBC_ERR_ENCODER = 14,
/*!
* This indicates an NvFBC context error.
*/
NVFBC_ERR_CONTEXT = 15,
/*!
* This indicates that the application must recreate the capture session.
*
* This error can be returned if a modeset event occurred while capturing
* frames, and NVFBC_CREATE_HANDLE_PARAMS::bDisableAutoModesetRecovery
* was set to NVFBC_TRUE.
*/
NVFBC_ERR_MUST_RECREATE = 16,
/*!
* This indicates a Vulkan error.
*/
NVFBC_ERR_VULKAN = 17,
} NVFBCSTATUS;
/*!
* Defines boolean values.
*/
typedef enum _NVFBC_BOOL {
/*!
* False value.
*/
NVFBC_FALSE = 0,
/*!
* True value.
*/
NVFBC_TRUE,
} NVFBC_BOOL;
/*!
* Maximum size in bytes of an error string.
*/
#define NVFBC_ERR_STR_LEN 512
/*!
* Capture type.
*/
typedef enum _NVFBC_CAPTURE_TYPE {
/*!
* Capture frames to a buffer in system memory.
*/
NVFBC_CAPTURE_TO_SYS = 0,
/*!
* Capture frames to a CUDA device in video memory.
*
* Specifying this will dlopen() libcuda.so.1 and fail if not available.
*/
NVFBC_CAPTURE_SHARED_CUDA,
/*!
* Retired. Do not use.
*/
/* NVFBC_CAPTURE_TO_HW_ENCODER, */
/*!
* Capture frames to an OpenGL buffer in video memory.
*/
NVFBC_CAPTURE_TO_GL = 3,
} NVFBC_CAPTURE_TYPE;
/*!
* Tracking type.
*
* NvFBC can track a specific region of the framebuffer to capture.
*
* An X screen corresponds to the entire framebuffer.
*
* An RandR CRTC is a component of the GPU that reads pixels from a region of
* the X screen and sends them through a pipeline to an RandR output.
* A physical monitor can be connected to an RandR output. Tracking an RandR
* output captures the region of the X screen that the RandR CRTC is sending to
* the RandR output.
*/
typedef enum {
/*!
* By default, NvFBC tries to track a connected primary output. If none is
* found, then it tries to track the first connected output. If none is
* found then it tracks the entire X screen.
*
* If the XRandR extension is not available, this option has the same effect
* as ::NVFBC_TRACKING_SCREEN.
*
* This default behavior might be subject to changes in the future.
*/
NVFBC_TRACKING_DEFAULT = 0,
/*!
* Track an RandR output specified by its ID in the appropriate field.
*
* The list of connected outputs can be queried via NvFBCGetStatus().
* This list can also be obtained using e.g., xrandr(1).
*
* If the XRandR extension is not available, setting this option returns an
* error.
*/
NVFBC_TRACKING_OUTPUT,
/*!
* Track the entire X screen.
*/
NVFBC_TRACKING_SCREEN,
} NVFBC_TRACKING_TYPE;
/*!
* Buffer format.
*/
typedef enum _NVFBC_BUFFER_FORMAT {
/*!
* Data will be converted to ARGB8888 byte-order format. 32 bpp.
*/
NVFBC_BUFFER_FORMAT_ARGB = 0,
/*!
* Data will be converted to RGB888 byte-order format. 24 bpp.
*/
NVFBC_BUFFER_FORMAT_RGB,
/*!
* Data will be converted to NV12 format using HDTV weights
* according to ITU-R BT.709. 12 bpp.
*/
NVFBC_BUFFER_FORMAT_NV12,
/*!
* Data will be converted to YUV 444 planar format using HDTV weights
* according to ITU-R BT.709. 24 bpp
*/
NVFBC_BUFFER_FORMAT_YUV444P,
/*!
* Data will be converted to RGBA8888 byte-order format. 32 bpp.
*/
NVFBC_BUFFER_FORMAT_RGBA,
/*!
* Native format. No pixel conversion needed.
* BGRA8888 byte-order format. 32 bpp.
*/
NVFBC_BUFFER_FORMAT_BGRA,
} NVFBC_BUFFER_FORMAT;
#define NVFBC_BUFFER_FORMAT_YUV420P NVFBC_BUFFER_FORMAT_NV12
/*!
* Handle used to identify an NvFBC session.
*/
typedef uint64_t NVFBC_SESSION_HANDLE;
/*!
* Box used to describe an area of the tracked region to capture.
*
* The coordinates are relative to the tracked region.
*
* E.g., if the size of the X screen is 3520x1200 and the tracked RandR output
* scans a region of 1600x1200+1920+0, then setting a capture box of
* 800x600+100+50 effectively captures a region of 800x600+2020+50 relative to
* the X screen.
*/
typedef struct _NVFBC_BOX {
/*!
* [in] X offset of the box.
*/
uint32_t x;
/*!
* [in] Y offset of the box.
*/
uint32_t y;
/*!
* [in] Width of the box.
*/
uint32_t w;
/*!
* [in] Height of the box.
*/
uint32_t h;
} NVFBC_BOX;
/*!
* Size used to describe the size of a frame.
*/
typedef struct _NVFBC_SIZE {
/*!
* [in] Width.
*/
uint32_t w;
/*!
* [in] Height.
*/
uint32_t h;
} NVFBC_SIZE;
/*!
* Describes information about a captured frame.
*/
typedef struct _NVFBC_FRAME_GRAB_INFO {
/*!
* [out] Width of the captured frame.
*/
uint32_t dwWidth;
/*!
* [out] Height of the captured frame.
*/
uint32_t dwHeight;
/*!
* [out] Size of the frame in bytes.
*/
uint32_t dwByteSize;
/*!
* [out] Incremental ID of the current frame.
*
* This can be used to identify a frame.
*/
uint32_t dwCurrentFrame;
/*!
* [out] Whether the captured frame is a new frame.
*
* When using non blocking calls it is possible to capture a frame
* that was already captured before if the display server did not
* render a new frame in the meantime. In that case, this flag
* will be set to NVFBC_FALSE.
*
* When using blocking calls each captured frame will have
* this flag set to NVFBC_TRUE since the blocking mechanism waits for
* the display server to render a new frame.
*
* Note that this flag does not guarantee that the content of
* the frame will be different compared to the previous captured frame.
*
* In particular, some compositing managers report the entire
* framebuffer as damaged when an application refreshes its content.
*
* Consider a single X screen spanned across physical displays A and B
* and an NvFBC application tracking display A. Depending on the
* compositing manager, it is possible that an application refreshing
* itself on display B will trigger a frame capture on display A.
*
* Workarounds include:
* - Using separate X screens
* - Disabling the composite extension
* - Using a compositing manager that properly reports what regions
* are damaged
* - Using NvFBC's diffmaps to find out if the frame changed
*/
NVFBC_BOOL bIsNewFrame;
/*!
* [out] Frame timestamp
*
* Time in micro seconds when the display server started rendering the
* frame.
*
* This does not account for when the frame was captured. If capturing an
* old frame (e.g., bIsNewFrame is NVFBC_FALSE) the reported timestamp
* will reflect the time when the old frame was rendered by the display
* server.
*/
uint64_t ulTimestampUs;
/*
* [out] Number of frames generated since the last capture.
*
* This can help applications tell whether they missed frames or there
* were no frames generated by the server since the last capture.
*/
uint32_t dwMissedFrames;
/*
* [out] Whether the captured frame required post processing.
*
* See the 'Post Processing' section.
*/
NVFBC_BOOL bRequiredPostProcessing;
/*
* [out] Whether this frame was obtained via direct capture.
*
* See NVFBC_CREATE_CAPTURE_SESSION_PARAMS::bAllowDirectCapture.
*/
NVFBC_BOOL bDirectCapture;
} NVFBC_FRAME_GRAB_INFO;
/*!
* Defines parameters for the CreateHandle() API call.
*/
typedef struct _NVFBC_CREATE_HANDLE_PARAMS {
/*!
* [in] Must be set to NVFBC_CREATE_HANDLE_PARAMS_VER
*/
uint32_t dwVersion;
/*!
* [in] Application specific private information passed to the NvFBC
* session.
*/
const void *privateData;
/*!
* [in] Size of the application specific private information passed to the
* NvFBC session.
*/
uint32_t privateDataSize;
/*!
* [in] Whether NvFBC should not create and manage its own graphics context
*
* NvFBC internally uses OpenGL to perform graphics operations on the
* captured frames. By default, NvFBC will create and manage (e.g., make
* current, detect new threads, etc.) its own OpenGL context.
*
* If set to NVFBC_TRUE, NvFBC will use the application's context. It will
* be the application's responsibility to make sure that a context is
* current on the thread calling into the NvFBC API.
*/
NVFBC_BOOL bExternallyManagedContext;
/*!
* [in] GLX context
*
* GLX context that NvFBC should use internally to create pixmaps and
* make them current when creating a new capture session.
*
* Note: NvFBC expects a context created against a GLX_RGBA_TYPE render
* type.
*/
void *glxCtx;
/*!
* [in] GLX framebuffer configuration
*
* Framebuffer configuration that was used to create the GLX context, and
* that will be used to create pixmaps internally.
*
* Note: NvFBC expects a configuration having at least the following
* attributes:
* GLX_DRAWABLE_TYPE, GLX_PIXMAP_BIT
* GLX_BIND_TO_TEXTURE_RGBA_EXT, 1
* GLX_BIND_TO_TEXTURE_TARGETS_EXT, GLX_TEXTURE_2D_BIT_EXT
*/
void *glxFBConfig;
} NVFBC_CREATE_HANDLE_PARAMS;
/*!
* NVFBC_CREATE_HANDLE_PARAMS structure version.
*/
#define NVFBC_CREATE_HANDLE_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_CREATE_HANDLE_PARAMS, 2)
/*!
* Defines parameters for the ::NvFBCDestroyHandle() API call.
*/
typedef struct _NVFBC_DESTROY_HANDLE_PARAMS {
/*!
* [in] Must be set to NVFBC_DESTROY_HANDLE_PARAMS_VER
*/
uint32_t dwVersion;
} NVFBC_DESTROY_HANDLE_PARAMS;
/*!
* NVFBC_DESTROY_HANDLE_PARAMS structure version.
*/
#define NVFBC_DESTROY_HANDLE_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_DESTROY_HANDLE_PARAMS, 1)
/*!
* Maximum number of connected RandR outputs to an X screen.
*/
#define NVFBC_OUTPUT_MAX 5
/*!
* Maximum size in bytes of an RandR output name.
*/
#define NVFBC_OUTPUT_NAME_LEN 128
/*!
* Describes an RandR output.
*
* Filling this structure relies on the XRandR extension. This feature cannot
* be used if the extension is missing or its version is below the requirements.
*
* \see Requirements
*/
typedef struct _NVFBC_OUTPUT {
/*!
* Identifier of the RandR output.
*/
uint32_t dwId;
/*!
* Name of the RandR output, as reported by tools such as xrandr(1).
*
* Example: "DVI-I-0"
*/
char name[NVFBC_OUTPUT_NAME_LEN];
/*!
* Region of the X screen tracked by the RandR CRTC driving this RandR
* output.
*/
NVFBC_BOX trackedBox;
} NVFBC_RANDR_OUTPUT_INFO;
/*!
* Defines parameters for the ::NvFBCGetStatus() API call.
*/
typedef struct _NVFBC_GET_STATUS_PARAMS {
/*!
* [in] Must be set to NVFBC_GET_STATUS_PARAMS_VER
*/
uint32_t dwVersion;
/*!
* [out] Whether or not framebuffer capture is supported by the graphics
* driver.
*/
NVFBC_BOOL bIsCapturePossible;
/*!
* [out] Whether or not there is already a capture session on this system.
*/
NVFBC_BOOL bCurrentlyCapturing;
/*!
* [out] Whether or not it is possible to create a capture session on this
* system.
*/
NVFBC_BOOL bCanCreateNow;
/*!
* [out] Size of the X screen (framebuffer).
*/
NVFBC_SIZE screenSize;
/*!
* [out] Whether the XRandR extension is available.
*
* If this extension is not available then it is not possible to have
* information about RandR outputs.
*/
NVFBC_BOOL bXRandRAvailable;
/*!
* [out] Array of outputs connected to the X screen.
*
* An application can track a specific output by specifying its ID when
* creating a capture session.
*
* Only if XRandR is available.
*/
NVFBC_RANDR_OUTPUT_INFO outputs[NVFBC_OUTPUT_MAX];
/*!
* [out] Number of outputs connected to the X screen.
*
* This must be used to parse the array of connected outputs.
*
* Only if XRandR is available.
*/
uint32_t dwOutputNum;
/*!
* [out] Version of the NvFBC library running on this system.
*/
uint32_t dwNvFBCVersion;
/*!
* [out] Whether the X server is currently in modeset.
*
* When the X server is in modeset, it must give up all its video
* memory allocations. It is not possible to create a capture
* session until the modeset is over.
*
* Note that VT-switches are considered modesets.
*/
NVFBC_BOOL bInModeset;
} NVFBC_GET_STATUS_PARAMS;
/*!
* NVFBC_GET_STATUS_PARAMS structure version.
*/
#define NVFBC_GET_STATUS_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_GET_STATUS_PARAMS, 2)
/*!
* Defines parameters for the ::NvFBCCreateCaptureSession() API call.
*/
typedef struct _NVFBC_CREATE_CAPTURE_SESSION_PARAMS {
/*!
* [in] Must be set to NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER
*/
uint32_t dwVersion;
/*!
* [in] Desired capture type.
*
* Note that when specyfing ::NVFBC_CAPTURE_SHARED_CUDA NvFBC will try to
* dlopen() the corresponding libraries. This means that NvFBC can run on
* a system without the CUDA library since it does not link against them.
*/
NVFBC_CAPTURE_TYPE eCaptureType;
/*!
* [in] What region of the framebuffer should be tracked.
*/
NVFBC_TRACKING_TYPE eTrackingType;
/*!
* [in] ID of the output to track if eTrackingType is set to
* ::NVFBC_TRACKING_OUTPUT.
*/
uint32_t dwOutputId;
/*!
* [in] Crop the tracked region.
*
* The coordinates are relative to the tracked region.
*
* It can be set to 0 to capture the entire tracked region.
*/
NVFBC_BOX captureBox;
/*!
* [in] Desired size of the captured frame.
*
* This parameter allow to scale the captured frame.
*
* It can be set to 0 to disable frame resizing.
*/
NVFBC_SIZE frameSize;
/*!
* [in] Whether the mouse cursor should be composited to the frame.
*
* Disabling the cursor will not generate new frames when only the cursor
* is moved.
*/
NVFBC_BOOL bWithCursor;
/*!
* [in] Whether NvFBC should not attempt to recover from modesets.
*
* NvFBC is able to detect when a modeset event occurred and can automatically
* re-create a capture session with the same settings as before, then resume
* its frame capture session transparently.
*
* This option allows to disable this behavior. NVFBC_ERR_MUST_RECREATE
* will be returned in that case.
*
* It can be useful in the cases when an application needs to do some work
* between setting up a capture and grabbing the first frame.
*
* For example: an application using the ToGL interface needs to register
* resources with EncodeAPI prior to encoding frames.
*
* Note that during modeset recovery, NvFBC will try to re-create the
* capture session every second until it succeeds.
*/
NVFBC_BOOL bDisableAutoModesetRecovery;
/*!
* [in] Whether NvFBC should round the requested frameSize.
*
* When disabled, NvFBC will not attempt to round the requested resolution.
*
* However, some pixel formats have resolution requirements. E.g., YUV/NV
* formats must have a width being a multiple of 4, and a height being a
* multiple of 2. RGB formats don't have such requirements.
*
* If the resolution doesn't meet the requirements of the format, then NvFBC
* will fail at setup time.
*
* When enabled, NvFBC will round the requested width to the next multiple
* of 4 and the requested height to the next multiple of 2.
*
* In this case, requesting any resolution will always work with every
* format. However, an NvFBC client must be prepared to handle the case
* where the requested resolution is different than the captured resolution.
*
* NVFBC_FRAME_GRAB_INFO::dwWidth and NVFBC_FRAME_GRAB_INFO::dwHeight should
* always be used for getting information about captured frames.
*/
NVFBC_BOOL bRoundFrameSize;
/*!
* [in] Rate in ms at which the display server generates new frames
*
* This controls the frequency at which the display server will generate
* new frames if new content is available. This effectively controls the
* capture rate when using blocking calls.
*
* Note that lower values will increase the CPU and GPU loads.
*
* The default value is 16ms (~ 60 Hz).
*/
uint32_t dwSamplingRateMs;
/*!
* [in] Enable push model for frame capture
*
* When set to NVFBC_TRUE, the display server will generate frames whenever
* it receives a damage event from applications.
*
* Setting this to NVFBC_TRUE will ignore ::dwSamplingRateMs.
*
* Using push model with the NVFBC_*_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY
* capture flag should guarantee the shortest amount of time between an
* application rendering a frame and an NvFBC client capturing it, provided
* that the NvFBC client is able to process the frames quickly enough.
*
* Note that applications running at high frame rates will increase CPU and
* GPU loads.
*/
NVFBC_BOOL bPushModel;
/*!
* [in] Allow direct capture
*
* Direct capture allows NvFBC to attach itself to a fullscreen graphics
* application. Whenever that application presents a frame, it makes a copy
* of it directly into a buffer owned by NvFBC thus bypassing the X server.
*
* When direct capture is *not* enabled, the NVIDIA X driver generates a
* frame for NvFBC when it receives a damage event from an application if push
* model is enabled, or periodically checks if there are any pending damage
* events otherwise (see NVFBC_CREATE_CAPTURE_SESSION_PARAMS::dwSamplingRateMs).
*
* Direct capture is possible under the following conditions:
* - Direct capture is allowed
* - Push model is enabled (see NVFBC_CREATE_CAPTURE_SESSION_PARAMS::bPushModel)
* - The mouse cursor is not composited (see NVFBC_CREATE_CAPTURE_SESSION_PARAMS::bWithCursor)
* - No viewport transformation is required. This happens when the remote
* desktop is e.g. rotated.
*
* When direct capture is possible, NvFBC will automatically attach itself
* to a fullscreen unoccluded application, if such exists.
*
* Notes:
* - This includes compositing desktops such as GNOME (e.g., gnome-shell
* is the fullscreen unoccluded application).
* - There can be only one fullscreen unoccluded application at a time.
* - The NVIDIA X driver monitors which application qualifies or no
* longer qualifies.
*
* For example, if a fullscreen application is launched in GNOME, NvFBC will
* detach from gnome-shell and attach to that application.
*
* Attaching and detaching happens automatically from the perspective of an
* NvFBC client. When detaching from an application, the X driver will
* transparently resume generating frames for NvFBC.
*
* An application can know whether a given frame was obtained through
* direct capture by checking NVFBC_FRAME_GRAB_INFO::bDirectCapture.
*/
NVFBC_BOOL bAllowDirectCapture;
} NVFBC_CREATE_CAPTURE_SESSION_PARAMS;
/*!
* NVFBC_CREATE_CAPTURE_SESSION_PARAMS structure version.
*/
#define NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_CREATE_CAPTURE_SESSION_PARAMS, 6)
/*!
* Defines parameters for the ::NvFBCDestroyCaptureSession() API call.
*/
typedef struct _NVFBC_DESTROY_CAPTURE_SESSION_PARAMS {
/*!
* [in] Must be set to NVFBC_DESTROY_CAPTURE_SESSION_PARAMS_VER
*/
uint32_t dwVersion;
} NVFBC_DESTROY_CAPTURE_SESSION_PARAMS;
/*!
* NVFBC_DESTROY_CAPTURE_SESSION_PARAMS structure version.
*/
#define NVFBC_DESTROY_CAPTURE_SESSION_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_DESTROY_CAPTURE_SESSION_PARAMS, 1)
/*!
* Defines parameters for the ::NvFBCBindContext() API call.
*/
typedef struct _NVFBC_BIND_CONTEXT_PARAMS {
/*!
* [in] Must be set to NVFBC_BIND_CONTEXT_PARAMS_VER
*/
uint32_t dwVersion;
} NVFBC_BIND_CONTEXT_PARAMS;
/*!
* NVFBC_BIND_CONTEXT_PARAMS structure version.
*/
#define NVFBC_BIND_CONTEXT_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_BIND_CONTEXT_PARAMS, 1)
/*!
* Defines parameters for the ::NvFBCReleaseContext() API call.
*/
typedef struct _NVFBC_RELEASE_CONTEXT_PARAMS {
/*!
* [in] Must be set to NVFBC_RELEASE_CONTEXT_PARAMS_VER
*/
uint32_t dwVersion;
} NVFBC_RELEASE_CONTEXT_PARAMS;
/*!
* NVFBC_RELEASE_CONTEXT_PARAMS structure version.
*/
#define NVFBC_RELEASE_CONTEXT_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_RELEASE_CONTEXT_PARAMS, 1)
/*!
* Defines flags that can be used when capturing to system memory.
*/
typedef enum {
/*!
* Default, capturing waits for a new frame or mouse move.
*
* The default behavior of blocking grabs is to wait for a new frame until
* after the call was made. But it's possible that there is a frame already
* ready that the client hasn't seen.
* \see NVFBC_TOSYS_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY
*/
NVFBC_TOSYS_GRAB_FLAGS_NOFLAGS = 0,
/*!
* Capturing does not wait for a new frame nor a mouse move.
*
* It is therefore possible to capture the same frame multiple times.
* When this occurs, the dwCurrentFrame parameter of the
* NVFBC_FRAME_GRAB_INFO structure is not incremented.
*/
NVFBC_TOSYS_GRAB_FLAGS_NOWAIT = (1 << 0),
/*!
* Forces the destination buffer to be refreshed even if the frame has not
* changed since previous capture.
*
* By default, if the captured frame is identical to the previous one, NvFBC
* will omit one copy and not update the destination buffer.
*
* Setting that flag will prevent this behavior. This can be useful e.g.,
* if the application has modified the buffer in the meantime.
*/
NVFBC_TOSYS_GRAB_FLAGS_FORCE_REFRESH = (1 << 1),
/*!
* Similar to NVFBC_TOSYS_GRAB_FLAGS_NOFLAGS, except that the capture will
* not wait if there is already a frame available that the client has
* never seen yet.
*/
NVFBC_TOSYS_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY = (1 << 2),
} NVFBC_TOSYS_GRAB_FLAGS;
/*!
* Defines parameters for the ::NvFBCToSysSetUp() API call.
*/
typedef struct _NVFBC_TOSYS_SETUP_PARAMS {
/*!
* [in] Must be set to NVFBC_TOSYS_SETUP_PARAMS_VER
*/
uint32_t dwVersion;
/*!
* [in] Desired buffer format.
*/
NVFBC_BUFFER_FORMAT eBufferFormat;
/*!
* [out] Pointer to a pointer to a buffer in system memory.
*
* This buffer contains the pixel value of the requested format. Refer to
* the description of the buffer formats to understand the memory layout.
*
* The application does not need to allocate memory for this buffer. It
* should not free this buffer either. This buffer is automatically
* re-allocated when needed (e.g., when the resolution changes).
*
* This buffer is allocated by the NvFBC library to the proper size. This
* size is returned in the dwByteSize field of the
* ::NVFBC_FRAME_GRAB_INFO structure.
*/
void **ppBuffer;
/*!
* [in] Whether differential maps should be generated.
*/
NVFBC_BOOL bWithDiffMap;
/*!
* [out] Pointer to a pointer to a buffer in system memory.
*
* This buffer contains the differential map of two frames. It must be read
* as an array of unsigned char. Each unsigned char is either 0 or
* non-zero. 0 means that the pixel value at the given location has not
* changed since the previous captured frame. Non-zero means that the pixel
* value has changed.
*
* The application does not need to allocate memory for this buffer. It
* should not free this buffer either. This buffer is automatically
* re-allocated when needed (e.g., when the resolution changes).
*
* This buffer is allocated by the NvFBC library to the proper size. The
* size of the differential map is returned in ::diffMapSize.
*
* This option is not compatible with the ::NVFBC_BUFFER_FORMAT_YUV420P and
* ::NVFBC_BUFFER_FORMAT_YUV444P buffer formats.
*/
void **ppDiffMap;
/*!
* [in] Scaling factor of the differential maps.
*
* For example, a scaling factor of 16 means that one pixel of the diffmap
* will represent 16x16 pixels of the original frames.
*
* If any of these 16x16 pixels is different between the current and the
* previous frame, then the corresponding pixel in the diffmap will be set
* to non-zero.
*
* The default scaling factor is 1. A dwDiffMapScalingFactor of 0 will be
* set to 1.
*/
uint32_t dwDiffMapScalingFactor;
/*!
* [out] Size of the differential map.
*
* Only set if bWithDiffMap is set to NVFBC_TRUE.
*/
NVFBC_SIZE diffMapSize;
} NVFBC_TOSYS_SETUP_PARAMS;
/*!
* NVFBC_TOSYS_SETUP_PARAMS structure version.
*/
#define NVFBC_TOSYS_SETUP_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_TOSYS_SETUP_PARAMS, 3)
/*!
* Defines parameters for the ::NvFBCToSysGrabFrame() API call.
*/
typedef struct _NVFBC_TOSYS_GRAB_FRAME_PARAMS {
/*!
* [in] Must be set to NVFBC_TOSYS_GRAB_FRAME_PARAMS_VER
*/
uint32_t dwVersion;
/*!
* [in] Flags defining the behavior of this frame capture.
*/
uint32_t dwFlags;
/*!
* [out] Information about the captured frame.
*
* Can be NULL.
*/
NVFBC_FRAME_GRAB_INFO *pFrameGrabInfo;
/*!
* [in] Wait timeout in milliseconds.
*
* When capturing frames with the NVFBC_TOSYS_GRAB_FLAGS_NOFLAGS or
* NVFBC_TOSYS_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY flags,
* NvFBC will wait for a new frame or mouse move until the below timer
* expires.
*
* When timing out, the last captured frame will be returned. Note that as
* long as the NVFBC_TOSYS_GRAB_FLAGS_FORCE_REFRESH flag is not set,
* returning an old frame will incur no performance penalty.
*
* NvFBC clients can use the return value of the grab frame operation to
* find out whether a new frame was captured, or the timer expired.
*
* Note that the behavior of blocking calls is to wait for a new frame
* *after* the call has been made. When using timeouts, it is possible
* that NvFBC will return a new frame (e.g., it has never been captured
* before) even though no new frame was generated after the grab call.
*
* For the precise definition of what constitutes a new frame, see
* ::bIsNewFrame.
*
* Set to 0 to disable timeouts.
*/
uint32_t dwTimeoutMs;
} NVFBC_TOSYS_GRAB_FRAME_PARAMS;
/*!
* NVFBC_TOSYS_GRAB_FRAME_PARAMS structure version.
*/
#define NVFBC_TOSYS_GRAB_FRAME_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_TOSYS_GRAB_FRAME_PARAMS, 2)
/*!
* Defines flags that can be used when capturing to a CUDA buffer in video memory.
*/
typedef enum {
/*!
* Default, capturing waits for a new frame or mouse move.
*
* The default behavior of blocking grabs is to wait for a new frame until
* after the call was made. But it's possible that there is a frame already
* ready that the client hasn't seen.
* \see NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY
*/
NVFBC_TOCUDA_GRAB_FLAGS_NOFLAGS = 0,
/*!
* Capturing does not wait for a new frame nor a mouse move.
*
* It is therefore possible to capture the same frame multiple times.
* When this occurs, the dwCurrentFrame parameter of the
* NVFBC_FRAME_GRAB_INFO structure is not incremented.
*/
NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT = (1 << 0),
/*!
* [in] Forces the destination buffer to be refreshed even if the frame
* has not changed since previous capture.
*
* By default, if the captured frame is identical to the previous one, NvFBC
* will omit one copy and not update the destination buffer.
*
* Setting that flag will prevent this behavior. This can be useful e.g.,
* if the application has modified the buffer in the meantime.
*/
NVFBC_TOCUDA_GRAB_FLAGS_FORCE_REFRESH = (1 << 1),
/*!
* Similar to NVFBC_TOCUDA_GRAB_FLAGS_NOFLAGS, except that the capture will
* not wait if there is already a frame available that the client has
* never seen yet.
*/
NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY = (1 << 2),
} NVFBC_TOCUDA_FLAGS;
/*!
* Defines parameters for the ::NvFBCToCudaSetUp() API call.
*/
typedef struct _NVFBC_TOCUDA_SETUP_PARAMS {
/*!
* [in] Must be set to NVFBC_TOCUDA_SETUP_PARAMS_VER
*/
uint32_t dwVersion;
/*!
* [in] Desired buffer format.
*/
NVFBC_BUFFER_FORMAT eBufferFormat;
} NVFBC_TOCUDA_SETUP_PARAMS;
/*!
* NVFBC_TOCUDA_SETUP_PARAMS structure version.
*/
#define NVFBC_TOCUDA_SETUP_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_TOCUDA_SETUP_PARAMS, 1)
/*!
* Defines parameters for the ::NvFBCToCudaGrabFrame() API call.
*/
typedef struct _NVFBC_TOCUDA_GRAB_FRAME_PARAMS {
/*!
* [in] Must be set to NVFBC_TOCUDA_GRAB_FRAME_PARAMS_VER.
*/
uint32_t dwVersion;
/*!
* [in] Flags defining the behavior of this frame capture.
*/
uint32_t dwFlags;
/*!
* [out] Pointer to a ::CUdeviceptr
*
* The application does not need to allocate memory for this CUDA device.
*
* The application does need to create its own CUDA context to use this
* CUDA device.
*
* This ::CUdeviceptr will be mapped to a segment in video memory containing
* the frame. It is not possible to process a CUDA device while capturing
* a new frame. If the application wants to do so, it must copy the CUDA
* device using ::cuMemcpyDtoD or ::cuMemcpyDtoH beforehand.
*/
void *pCUDADeviceBuffer;
/*!
* [out] Information about the captured frame.
*
* Can be NULL.
*/
NVFBC_FRAME_GRAB_INFO *pFrameGrabInfo;
/*!
* [in] Wait timeout in milliseconds.
*
* When capturing frames with the NVFBC_TOCUDA_GRAB_FLAGS_NOFLAGS or
* NVFBC_TOCUDA_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY flags,
* NvFBC will wait for a new frame or mouse move until the below timer
* expires.
*
* When timing out, the last captured frame will be returned. Note that as
* long as the NVFBC_TOCUDA_GRAB_FLAGS_FORCE_REFRESH flag is not set,
* returning an old frame will incur no performance penalty.
*
* NvFBC clients can use the return value of the grab frame operation to
* find out whether a new frame was captured, or the timer expired.
*
* Note that the behavior of blocking calls is to wait for a new frame
* *after* the call has been made. When using timeouts, it is possible
* that NvFBC will return a new frame (e.g., it has never been captured
* before) even though no new frame was generated after the grab call.
*
* For the precise definition of what constitutes a new frame, see
* ::bIsNewFrame.
*
* Set to 0 to disable timeouts.
*/
uint32_t dwTimeoutMs;
} NVFBC_TOCUDA_GRAB_FRAME_PARAMS;
/*!
* NVFBC_TOCUDA_GRAB_FRAME_PARAMS structure version.
*/
#define NVFBC_TOCUDA_GRAB_FRAME_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_TOCUDA_GRAB_FRAME_PARAMS, 2)
/*!
* Defines flags that can be used when capturing to an OpenGL buffer in video memory.
*/
typedef enum {
/*!
* Default, capturing waits for a new frame or mouse move.
*
* The default behavior of blocking grabs is to wait for a new frame until
* after the call was made. But it's possible that there is a frame already
* ready that the client hasn't seen.
* \see NVFBC_TOGL_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY
*/
NVFBC_TOGL_GRAB_FLAGS_NOFLAGS = 0,
/*!
* Capturing does not wait for a new frame nor a mouse move.
*
* It is therefore possible to capture the same frame multiple times.
* When this occurs, the dwCurrentFrame parameter of the
* NVFBC_FRAME_GRAB_INFO structure is not incremented.
*/
NVFBC_TOGL_GRAB_FLAGS_NOWAIT = (1 << 0),
/*!
* [in] Forces the destination buffer to be refreshed even if the frame
* has not changed since previous capture.
*
* By default, if the captured frame is identical to the previous one, NvFBC
* will omit one copy and not update the destination buffer.
*
* Setting that flag will prevent this behavior. This can be useful e.g.,
* if the application has modified the buffer in the meantime.
*/
NVFBC_TOGL_GRAB_FLAGS_FORCE_REFRESH = (1 << 1),
/*!
* Similar to NVFBC_TOGL_GRAB_FLAGS_NOFLAGS, except that the capture will
* not wait if there is already a frame available that the client has
* never seen yet.
*/
NVFBC_TOGL_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY = (1 << 2),
} NVFBC_TOGL_FLAGS;
/*!
* Maximum number of GL textures that can be used to store frames.
*/
#define NVFBC_TOGL_TEXTURES_MAX 2
/*!
* Defines parameters for the ::NvFBCToGLSetUp() API call.
*/
typedef struct _NVFBC_TOGL_SETUP_PARAMS {
/*!
* [in] Must be set to NVFBC_TOGL_SETUP_PARAMS_VER
*/
uint32_t dwVersion;
/*!
* [in] Desired buffer format.
*/
NVFBC_BUFFER_FORMAT eBufferFormat;
/*!
* [in] Whether differential maps should be generated.
*/
NVFBC_BOOL bWithDiffMap;
/*!
* [out] Pointer to a pointer to a buffer in system memory.
*
* \see NVFBC_TOSYS_SETUP_PARAMS::ppDiffMap
*/
void **ppDiffMap;
/*!
* [in] Scaling factor of the differential maps.
*
* \see NVFBC_TOSYS_SETUP_PARAMS::dwDiffMapScalingFactor
*/
uint32_t dwDiffMapScalingFactor;
/*!
* [out] List of GL textures that will store the captured frames.
*
* This array is 0 terminated. The number of textures varies depending on
* the capture settings (such as whether diffmaps are enabled).
*
* An application wishing to interop with, for example, EncodeAPI will need
* to register these textures prior to start encoding frames.
*
* After each frame capture, the texture holding the current frame will be
* returned in NVFBC_TOGL_GRAB_FRAME_PARAMS::dwTexture.
*/
uint32_t dwTextures[NVFBC_TOGL_TEXTURES_MAX];
/*!
* [out] GL target to which the texture should be bound.
*/
uint32_t dwTexTarget;
/*!
* [out] GL format of the textures.
*/
uint32_t dwTexFormat;
/*!
* [out] GL type of the textures.
*/
uint32_t dwTexType;
/*!
* [out] Size of the differential map.
*
* Only set if bWithDiffMap is set to NVFBC_TRUE.
*/
NVFBC_SIZE diffMapSize;
} NVFBC_TOGL_SETUP_PARAMS;
/*!
* NVFBC_TOGL_SETUP_PARAMS structure version.
*/
#define NVFBC_TOGL_SETUP_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_TOGL_SETUP_PARAMS, 2)
/*!
* Defines parameters for the ::NvFBCToGLGrabFrame() API call.
*/
typedef struct _NVFBC_TOGL_GRAB_FRAME_PARAMS {
/*!
* [in] Must be set to NVFBC_TOGL_GRAB_FRAME_PARAMS_VER.
*/
uint32_t dwVersion;
/*!
* [in] Flags defining the behavior of this frame capture.
*/
uint32_t dwFlags;
/*!
* [out] Index of the texture storing the current frame.
*
* This is an index in the NVFBC_TOGL_SETUP_PARAMS::dwTextures array.
*/
uint32_t dwTextureIndex;
/*!
* [out] Information about the captured frame.
*
* Can be NULL.
*/
NVFBC_FRAME_GRAB_INFO *pFrameGrabInfo;
/*!
* [in] Wait timeout in milliseconds.
*
* When capturing frames with the NVFBC_TOGL_GRAB_FLAGS_NOFLAGS or
* NVFBC_TOGL_GRAB_FLAGS_NOWAIT_IF_NEW_FRAME_READY flags,
* NvFBC will wait for a new frame or mouse move until the below timer
* expires.
*
* When timing out, the last captured frame will be returned. Note that as
* long as the NVFBC_TOGL_GRAB_FLAGS_FORCE_REFRESH flag is not set,
* returning an old frame will incur no performance penalty.
*
* NvFBC clients can use the return value of the grab frame operation to
* find out whether a new frame was captured, or the timer expired.
*
* Note that the behavior of blocking calls is to wait for a new frame
* *after* the call has been made. When using timeouts, it is possible
* that NvFBC will return a new frame (e.g., it has never been captured
* before) even though no new frame was generated after the grab call.
*
* For the precise definition of what constitutes a new frame, see
* ::bIsNewFrame.
*
* Set to 0 to disable timeouts.
*/
uint32_t dwTimeoutMs;
} NVFBC_TOGL_GRAB_FRAME_PARAMS;
/*!
* NVFBC_TOGL_GRAB_FRAME_PARAMS structure version.
*/
#define NVFBC_TOGL_GRAB_FRAME_PARAMS_VER NVFBC_STRUCT_VERSION(NVFBC_TOGL_GRAB_FRAME_PARAMS, 2)
/*! @} FBC_STRUCT */
/*!
* \defgroup FBC_FUNC API Entry Points
*
* Entry points are thread-safe and can be called concurrently.
*
* The locking model includes a global lock that protects session handle
* management (\see NvFBCCreateHandle, \see NvFBCDestroyHandle).
*
* Each NvFBC session uses a local lock to protect other entry points. Note
* that in certain cases, a thread can hold the local lock for an undefined
* amount of time, such as grabbing a frame using a blocking call.
*
* Note that a context is associated with each session. NvFBC clients wishing
* to share a session between different threads are expected to release and
* bind the context appropriately (\see NvFBCBindContext,
* \see NvFBCReleaseContext). This is not required when each thread uses its
* own NvFBC session.
*
* @{
*/
/*!
* Gets the last error message that got recorded for a client.
*
* When NvFBC returns an error, it will save an error message that can be
* queried through this API call. Only the last message is saved.
* The message and the return code should give enough information about
* what went wrong.
*
* \param [in] sessionHandle
* Handle to the NvFBC client.
* \return
* A NULL terminated error message, or an empty string. Its maximum length
* is NVFBC_ERROR_STR_LEN.
*/
const char *NVFBCAPI
NvFBCGetLastErrorStr(const NVFBC_SESSION_HANDLE sessionHandle);
/*!
* \brief Allocates a new handle for an NvFBC client.
*
* This function allocates a session handle used to identify an FBC client.
*
* This function implicitly calls NvFBCBindContext().
*
* \param [out] pSessionHandle
* Pointer that will hold the allocated session handle.
* \param [in] pParams
* ::NVFBC_CREATE_HANDLE_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_PTR \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_OUT_OF_MEMORY \n
* ::NVFBC_ERR_MAX_CLIENTS \n
* ::NVFBC_ERR_X \n
* ::NVFBC_ERR_GLX \n
* ::NVFBC_ERR_GL
*
*/
NVFBCSTATUS NVFBCAPI
NvFBCCreateHandle(NVFBC_SESSION_HANDLE *pSessionHandle, NVFBC_CREATE_HANDLE_PARAMS *pParams);
/*!
* \brief Destroys the handle of an NvFBC client.
*
* This function uninitializes an FBC client.
*
* This function implicitly calls NvFBCReleaseContext().
*
* After this function returns, it is not possible to use this session handle
* for any further API call.
*
* \param [in] sessionHandle
* FBC session handle.
* \param [in] pParams
* ::NVFBC_DESTROY_HANDLE_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_X
*/
NVFBCSTATUS NVFBCAPI
NvFBCDestroyHandle(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_DESTROY_HANDLE_PARAMS *pParams);
/*!
* \brief Gets the current status of the display driver.
*
* This function queries the display driver for various information.
*
* \param [in] sessionHandle
* FBC session handle.
* \param [in] pParams
* ::NVFBC_GET_STATUS_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_X
*/
NVFBCSTATUS NVFBCAPI
NvFBCGetStatus(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_GET_STATUS_PARAMS *pParams);
/*!
* \brief Binds the FBC context to the calling thread.
*
* The NvFBC library internally relies on objects that must be bound to a
* thread. Such objects are OpenGL contexts and CUDA contexts.
*
* This function binds these objects to the calling thread.
*
* The FBC context must be bound to the calling thread for most NvFBC entry
* points, otherwise ::NVFBC_ERR_CONTEXT is returned.
*
* If the FBC context is already bound to a different thread,
* ::NVFBC_ERR_CONTEXT is returned. The other thread must release the context
* first by calling the ReleaseContext() entry point.
*
* If the FBC context is already bound to the current thread, this function has
* no effects.
*
* \param [in] sessionHandle
* FBC session handle.
* \param [in] pParams
* ::NVFBC_DESTROY_CAPTURE_SESSION_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_X
*/
NVFBCSTATUS NVFBCAPI
NvFBCBindContext(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_BIND_CONTEXT_PARAMS *pParams);
/*!
* \brief Releases the FBC context from the calling thread.
*
* If the FBC context is bound to a different thread, ::NVFBC_ERR_CONTEXT is
* returned.
*
* If the FBC context is already released, this function has no effects.
*
* \param [in] sessionHandle
* FBC session handle.
* \param [in] pParams
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_X
*/
NVFBCSTATUS NVFBCAPI
NvFBCReleaseContext(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_RELEASE_CONTEXT_PARAMS *pParams);
/*!
* \brief Creates a capture session for an FBC client.
*
* This function starts a capture session of the desired type (system memory,
* video memory with CUDA interop, or H.264 compressed frames in system memory).
*
* Not all types are supported on all systems. Also, it is possible to use
* NvFBC without having the CUDA library. In this case, requesting a capture
* session of the concerned type will return an error.
*
* After this function returns, the display driver will start generating frames
* that can be captured using the corresponding API call.
*
* \param [in] sessionHandle
* FBC session handle.
* \param [in] pParams
* ::NVFBC_CREATE_CAPTURE_SESSION_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_INVALID_PARAM \n
* ::NVFBC_ERR_OUT_OF_MEMORY \n
* ::NVFBC_ERR_X \n
* ::NVFBC_ERR_GLX \n
* ::NVFBC_ERR_GL \n
* ::NVFBC_ERR_CUDA \n
* ::NVFBC_ERR_MUST_RECREATE \n
* ::NVFBC_ERR_INTERNAL
*/
NVFBCSTATUS NVFBCAPI
NvFBCCreateCaptureSession(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_CREATE_CAPTURE_SESSION_PARAMS *pParams);
/*!
* \brief Destroys a capture session for an FBC client.
*
* This function stops a capture session and frees allocated objects.
*
* After this function returns, it is possible to create another capture
* session using the corresponding API call.
*
* \param [in] sessionHandle
* FBC session handle.
* \param [in] pParams
* ::NVFBC_DESTROY_CAPTURE_SESSION_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_X
*/
NVFBCSTATUS NVFBCAPI
NvFBCDestroyCaptureSession(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_DESTROY_CAPTURE_SESSION_PARAMS *pParams);
/*!
* \brief Sets up a capture to system memory session.
*
* This function configures how the capture to system memory should behave. It
* can be called anytime and several times after the capture session has been
* created. However, it must be called at least once prior to start capturing
* frames.
*
* This function allocates the buffer that will contain the captured frame.
* The application does not need to free this buffer. The size of this buffer
* is returned in the ::NVFBC_FRAME_GRAB_INFO structure.
*
* \param [in] sessionHandle
* FBC session handle.
* \param [in] pParams
* ::NVFBC_TOSYS_SETUP_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_UNSUPPORTED \n
* ::NVFBC_ERR_INVALID_PTR \n
* ::NVFBC_ERR_INVALID_PARAM \n
* ::NVFBC_ERR_OUT_OF_MEMORY \n
* ::NVFBC_ERR_X
*/
NVFBCSTATUS NVFBCAPI
NvFBCToSysSetUp(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOSYS_SETUP_PARAMS *pParams);
/*!
* \brief Captures a frame to a buffer in system memory.
*
* This function triggers a frame capture to a buffer in system memory that was
* registered with the ToSysSetUp() API call.
*
* Note that it is possible that the resolution of the desktop changes while
* capturing frames. This should be transparent for the application.
*
* When the resolution changes, the capture session is recreated using the same
* parameters, and necessary buffers are re-allocated. The frame counter is not
* reset.
*
* An application can detect that the resolution changed by comparing the
* dwByteSize member of the ::NVFBC_FRAME_GRAB_INFO against a previous
* frame and/or dwWidth and dwHeight.
*
* During a change of resolution the capture is paused even in asynchronous
* mode.
*
* \param [in] sessionHandle
* FBC session handle.
* \param [in] pParams
* ::NVFBC_TOSYS_GRAB_FRAME_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_INVALID_PTR \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_X \n
* ::NVFBC_ERR_MUST_RECREATE \n
* \see NvFBCCreateCaptureSession \n
* \see NvFBCToSysSetUp
*/
NVFBCSTATUS NVFBCAPI
NvFBCToSysGrabFrame(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOSYS_GRAB_FRAME_PARAMS *pParams);
/*!
* \brief Sets up a capture to video memory session.
*
* This function configures how the capture to video memory with CUDA interop
* should behave. It can be called anytime and several times after the capture
* session has been created. However, it must be called at least once prior
* to start capturing frames.
*
* \param [in] sessionHandle
* FBC session handle.
*
* \param [in] pParams
* ::NVFBC_TOCUDA_SETUP_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_UNSUPPORTED \n
* ::NVFBC_ERR_GL \n
* ::NVFBC_ERR_X
*/
NVFBCSTATUS NVFBCAPI
NvFBCToCudaSetUp(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOCUDA_SETUP_PARAMS *pParams);
/*!
* \brief Captures a frame to a CUDA device in video memory.
*
* This function triggers a frame capture to a CUDA device in video memory.
*
* Note about changes of resolution: \see NvFBCToSysGrabFrame
*
* \param [in] sessionHandle
* FBC session handle.
*
* \param [in] pParams
* ::NVFBC_TOCUDA_GRAB_FRAME_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_INVALID_PTR \n
* ::NVFBC_ERR_CUDA \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_X \n
* ::NVFBC_ERR_MUST_RECREATE \n
* \see NvFBCCreateCaptureSession \n
* \see NvFBCToCudaSetUp
*/
NVFBCSTATUS NVFBCAPI
NvFBCToCudaGrabFrame(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOCUDA_GRAB_FRAME_PARAMS *pParams);
/*!
* \brief Sets up a capture to OpenGL buffer in video memory session.
*
* This function configures how the capture to video memory should behave.
* It can be called anytime and several times after the capture session has been
* created. However, it must be called at least once prior to start capturing
* frames.
*
* \param [in] sessionHandle
* FBC session handle.
*
* \param [in] pParams
* ::NVFBC_TOGL_SETUP_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_UNSUPPORTED \n
* ::NVFBC_ERR_GL \n
* ::NVFBC_ERR_X
*/
NVFBCSTATUS NVFBCAPI
NvFBCToGLSetUp(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOGL_SETUP_PARAMS *pParams);
/*!
* \brief Captures a frame to an OpenGL buffer in video memory.
*
* This function triggers a frame capture to a selected resource in video memory.
*
* Note about changes of resolution: \see NvFBCToSysGrabFrame
*
* \param [in] sessionHandle
* FBC session handle.
*
* \param [in] pParams
* ::NVFBC_TOGL_GRAB_FRAME_PARAMS
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_HANDLE \n
* ::NVFBC_ERR_API_VERSION \n
* ::NVFBC_ERR_BAD_REQUEST \n
* ::NVFBC_ERR_CONTEXT \n
* ::NVFBC_ERR_INVALID_PTR \n
* ::NVFBC_ERR_INTERNAL \n
* ::NVFBC_ERR_X \n
* ::NVFBC_ERR_MUST_RECREATE \n
* \see NvFBCCreateCaptureSession \n
* \see NvFBCToCudaSetUp
*/
NVFBCSTATUS NVFBCAPI
NvFBCToGLGrabFrame(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOGL_GRAB_FRAME_PARAMS *pParams);
/*!
* \cond FBC_PFN
*
* Defines API function pointers
*/
typedef const char *(NVFBCAPI *PNVFBCGETLASTERRORSTR)(const NVFBC_SESSION_HANDLE sessionHandle);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCCREATEHANDLE)(NVFBC_SESSION_HANDLE *pSessionHandle, NVFBC_CREATE_HANDLE_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCDESTROYHANDLE)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_DESTROY_HANDLE_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCBINDCONTEXT)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_BIND_CONTEXT_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCRELEASECONTEXT)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_RELEASE_CONTEXT_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCGETSTATUS)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_GET_STATUS_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCCREATECAPTURESESSION)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_CREATE_CAPTURE_SESSION_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCDESTROYCAPTURESESSION)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_DESTROY_CAPTURE_SESSION_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCTOSYSSETUP)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOSYS_SETUP_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCTOSYSGRABFRAME)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOSYS_GRAB_FRAME_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCTOCUDASETUP)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOCUDA_SETUP_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCTOCUDAGRABFRAME)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOCUDA_GRAB_FRAME_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCTOGLSETUP)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOGL_SETUP_PARAMS *pParams);
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCTOGLGRABFRAME)(const NVFBC_SESSION_HANDLE sessionHandle, NVFBC_TOGL_GRAB_FRAME_PARAMS *pParams);
/// \endcond
/*! @} FBC_FUNC */
/*!
* \ingroup FBC_STRUCT
*
* Structure populated with API function pointers.
*/
typedef struct
{
uint32_t dwVersion; //!< [in] Must be set to NVFBC_VERSION.
PNVFBCGETLASTERRORSTR nvFBCGetLastErrorStr; //!< [out] Pointer to ::NvFBCGetLastErrorStr().
PNVFBCCREATEHANDLE nvFBCCreateHandle; //!< [out] Pointer to ::NvFBCCreateHandle().
PNVFBCDESTROYHANDLE nvFBCDestroyHandle; //!< [out] Pointer to ::NvFBCDestroyHandle().
PNVFBCGETSTATUS nvFBCGetStatus; //!< [out] Pointer to ::NvFBCGetStatus().
PNVFBCCREATECAPTURESESSION nvFBCCreateCaptureSession; //!< [out] Pointer to ::NvFBCCreateCaptureSession().
PNVFBCDESTROYCAPTURESESSION nvFBCDestroyCaptureSession; //!< [out] Pointer to ::NvFBCDestroyCaptureSession().
PNVFBCTOSYSSETUP nvFBCToSysSetUp; //!< [out] Pointer to ::NvFBCToSysSetUp().
PNVFBCTOSYSGRABFRAME nvFBCToSysGrabFrame; //!< [out] Pointer to ::NvFBCToSysGrabFrame().
PNVFBCTOCUDASETUP nvFBCToCudaSetUp; //!< [out] Pointer to ::NvFBCToCudaSetUp().
PNVFBCTOCUDAGRABFRAME nvFBCToCudaGrabFrame; //!< [out] Pointer to ::NvFBCToCudaGrabFrame().
void *pad1; //!< [out] Retired. Do not use.
void *pad2; //!< [out] Retired. Do not use.
void *pad3; //!< [out] Retired. Do not use.
PNVFBCBINDCONTEXT nvFBCBindContext; //!< [out] Pointer to ::NvFBCBindContext().
PNVFBCRELEASECONTEXT nvFBCReleaseContext; //!< [out] Pointer to ::NvFBCReleaseContext().
void *pad4; //!< [out] Retired. Do not use.
void *pad5; //!< [out] Retired. Do not use.
void *pad6; //!< [out] Retired. Do not use.
void *pad7; //!< [out] Retired. Do not use.
PNVFBCTOGLSETUP nvFBCToGLSetUp; //!< [out] Pointer to ::nvFBCToGLSetup().
PNVFBCTOGLGRABFRAME nvFBCToGLGrabFrame; //!< [out] Pointer to ::nvFBCToGLGrabFrame().
} NVFBC_API_FUNCTION_LIST;
/*!
* \ingroup FBC_FUNC
*
* \brief Entry Points to the NvFBC interface.
*
* Creates an instance of the NvFBC interface, and populates the
* pFunctionList with function pointers to the API routines implemented by
* the NvFBC interface.
*
* \param [out] pFunctionList
*
* \return
* ::NVFBC_SUCCESS \n
* ::NVFBC_ERR_INVALID_PTR \n
* ::NVFBC_ERR_API_VERSION
*/
NVFBCSTATUS NVFBCAPI
NvFBCCreateInstance(NVFBC_API_FUNCTION_LIST *pFunctionList);
/*!
* \ingroup FBC_FUNC
*
* Defines function pointer for the ::NvFBCCreateInstance() API call.
*/
typedef NVFBCSTATUS(NVFBCAPI *PNVFBCCREATEINSTANCE)(NVFBC_API_FUNCTION_LIST *pFunctionList);
#ifdef __cplusplus
}
#endif
#endif // _NVFBC_H_
| 65,817
|
C++
|
.h
| 1,914
| 31.584117
| 148
| 0.722458
|
LizardByte/Sunshine
| 18,156
| 876
| 103
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.