code
stringlengths 0
56.1M
| repo_name
stringlengths 3
57
| path
stringlengths 2
176
| language
stringclasses 672
values | license
stringclasses 8
values | size
int64 0
56.8M
|
|---|---|---|---|---|---|
#include "indexed_model.hpp"
#include <glm/glm.hpp>
using namespace ZN;
void IndexedModel::add_position(glm::vec3 pos) {
m_positions.emplace_back(std::move(pos));
}
void IndexedModel::add_normal(glm::vec3 norm) {
m_normals.emplace_back(std::move(norm));
}
void IndexedModel::add_tangent(glm::vec3 tan) {
m_tangents.emplace_back(std::move(tan));
}
void IndexedModel::add_tex_coord(glm::vec2 uv) {
m_texCoords.emplace_back(std::move(uv));
}
void IndexedModel::add_index(uint32_t i0) {
m_indices.emplace_back(i0);
}
void IndexedModel::calc_tangents() {
m_tangents.resize(m_positions.size());
for (size_t i = 0; i < m_indices.size(); i += 3) {
auto i0 = m_indices[i];
auto i1 = m_indices[i + 1];
auto i2 = m_indices[i + 2];
auto edge1 = m_positions[i1] - m_positions[i0];
auto edge2 = m_positions[i2] - m_positions[i0];
auto uv1 = m_texCoords[i1] - m_texCoords[i0];
auto uv2 = m_texCoords[i2] - m_texCoords[i0];
auto dividend = uv1.x * uv2.y - uv2.x * uv1.y;
auto f = dividend == 0.f ? 0.f : 1.f / dividend;
auto tangent = (edge1 * uv2.y - edge2 * uv1.y) * f;
m_tangents[i0] += tangent;
m_tangents[i1] += tangent;
m_tangents[i2] += tangent;
}
for (auto& tangent : m_tangents) {
tangent = glm::normalize(tangent);
}
}
const glm::vec3* IndexedModel::get_positions() const {
return m_positions.data();
}
const glm::vec3* IndexedModel::get_normals() const {
return m_normals.data();
}
const glm::vec3* IndexedModel::get_tangents() const {
return m_tangents.data();
}
const glm::vec2* IndexedModel::get_tex_coords() const {
return m_texCoords.data();
}
const uint32_t* IndexedModel::get_indices() const {
return m_indices.data();
}
size_t IndexedModel::get_vertex_count() const {
return m_positions.size();
}
size_t IndexedModel::get_index_count() const {
return m_indices.size();
}
|
whupdup/frame
|
real/asset/indexed_model.cpp
|
C++
|
gpl-3.0
| 1,842
|
#pragma once
#include <glm/vec2.hpp>
#include <glm/vec3.hpp>
#include <cstdint>
#include <vector>
namespace ZN {
class IndexedModel final {
public:
explicit IndexedModel() = default;
IndexedModel(IndexedModel&&) = default;
IndexedModel& operator=(IndexedModel&&) = default;
IndexedModel(const IndexedModel&) = delete;
void operator=(const IndexedModel&) = delete;
void add_position(glm::vec3);
void add_normal(glm::vec3);
void add_tangent(glm::vec3);
void add_tex_coord(glm::vec2);
void add_index(uint32_t);
void calc_tangents();
const glm::vec3* get_positions() const;
const glm::vec3* get_normals() const;
const glm::vec3* get_tangents() const;
const glm::vec2* get_tex_coords() const;
const uint32_t* get_indices() const;
size_t get_vertex_count() const;
size_t get_index_count() const;
private:
std::vector<glm::vec3> m_positions;
std::vector<glm::vec3> m_normals;
std::vector<glm::vec3> m_tangents;
std::vector<glm::vec2> m_texCoords;
std::vector<uint32_t> m_indices;
};
}
|
whupdup/frame
|
real/asset/indexed_model.hpp
|
C++
|
gpl-3.0
| 1,042
|
#include "obj_loader.hpp"
#include <asset/indexed_model.hpp>
#include <file/file_system.hpp>
#include <charconv>
#include <unordered_map>
#include <cctype>
#include <cstdio>
using namespace ZN;
namespace ZN::OBJ {
struct VertexIndices {
uint32_t values[3];
bool operator==(const VertexIndices& other) const {
return values[0] == other.values[0] && values[1] == other.values[1]
&& values[2] == other.values[2];
}
};
struct Face {
VertexIndices indices[3];
};
struct VertexIndicesHash {
size_t operator()(const VertexIndices& vi) const {
size_t result = 17ull;
for (size_t i = 0; i < 3; ++i) {
result = 31ull * result + vi.values[i];
}
return result;
}
};
class Parser final {
public:
explicit Parser(const char* data, size_t size, OBJ::LoadCallback* callback);
void parse();
private:
const char* m_curr;
const char* m_end;
OBJ::LoadCallback* m_callback;
const char* m_nameStart;
const char* m_nameEnd;
std::vector<glm::vec3> m_positions;
std::vector<glm::vec3> m_normals;
std::vector<glm::vec2> m_texCoords;
std::vector<Face> m_faces;
bool m_error;
void emit_mesh();
void parse_face();
void parse_face_vertex(uint32_t* vertexIndices);
void parse_v_line();
void parse_object_name();
void parse_comment();
template <typename T>
T parse_vector();
uint32_t parse_uint();
float parse_float();
void consume_whitespace();
char get();
char peek() const;
bool has_next() const;
void raise_error();
static bool is_newline_char(char);
static bool is_float_char(char);
};
}
void OBJ::load(const char* fileName, OBJ::LoadCallback* callback) {
if (!fileName || !callback) {
return;
}
auto data = g_fileSystem->file_read_bytes(fileName);
Parser parser(data.data(), data.size(), callback);
parser.parse();
}
OBJ::Parser::Parser(const char* data, size_t size, OBJ::LoadCallback* callback)
: m_curr(data)
, m_end(data + size)
, m_callback(callback)
, m_error(false) {}
void OBJ::Parser::parse() {
while (has_next()) {
char c = get();
switch (c) {
case 'f':
parse_face();
break;
case 'o':
parse_object_name();
break;
case 'v':
parse_v_line();
break;
case 's':
case '#':
parse_comment();
break;
default:
break;
}
}
emit_mesh();
}
void OBJ::Parser::emit_mesh() {
if (m_faces.empty()) {
return;
}
IndexedModel model{};
std::unordered_map<VertexIndices, uint32_t, VertexIndicesHash> resultIndices;
uint32_t positionCount = 0;
for (auto& face : m_faces) {
for (auto& vi : face.indices) {
auto& currPos = m_positions[vi.values[0] - 1];
glm::vec3 currNormal{};
glm::vec2 currTexCoord{};
if (vi.values[1]) {
currTexCoord = m_texCoords[vi.values[1] - 1];
currTexCoord.y = 1.f - currTexCoord.y;
}
if (vi.values[2]) {
currNormal = m_normals[vi.values[2] - 1];
}
uint32_t modelVertexIndex = {};
if (auto it = resultIndices.find(vi); it != resultIndices.end()) {
modelVertexIndex = it->second;
}
else {
modelVertexIndex = positionCount++;
resultIndices.emplace(std::make_pair(vi, modelVertexIndex));
model.add_position(currPos);
model.add_tex_coord(std::move(currTexCoord));
model.add_normal(std::move(currNormal));
}
model.add_index(modelVertexIndex);
}
}
model.calc_tangents();
m_callback(std::string_view(m_nameStart, m_nameEnd), std::move(model));
}
void OBJ::Parser::parse_face() {
Face face{};
uint32_t vertexIndex = 0;
while (has_next()) {
char c = peek();
if (is_newline_char(c)) {
get();
break;
}
else if (std::isspace(c)) {
consume_whitespace();
}
else {
parse_face_vertex(face.indices[vertexIndex].values);
++vertexIndex;
}
}
m_faces.emplace_back(std::move(face));
}
void OBJ::Parser::parse_face_vertex(uint32_t* vertexIndices) {
while (has_next()) {
char c = peek();
if (std::isdigit(c)) {
*vertexIndices = parse_uint();
}
else if (c == '/') {
++m_curr;
++vertexIndices;
}
else {
break;
}
}
}
void OBJ::Parser::parse_v_line() {
if (!has_next()) {
raise_error();
return;
}
char c = get();
switch (c) {
case 'n':
m_normals.emplace_back(parse_vector<glm::vec3>());
break;
case 't':
m_texCoords.emplace_back(parse_vector<glm::vec2>());
break;
case ' ':
m_positions.emplace_back(parse_vector<glm::vec3>());
break;
default:
parse_comment();
}
}
void OBJ::Parser::parse_object_name() {
emit_mesh();
consume_whitespace();
m_nameStart = m_curr;
while (has_next() && !is_newline_char(*m_curr)) {
++m_curr;
}
m_nameEnd = m_curr;
}
void OBJ::Parser::parse_comment() {
while (has_next() && !is_newline_char(*m_curr)) {
++m_curr;
}
}
template <typename T>
T OBJ::Parser::parse_vector() {
T vec{};
uint32_t componentIndex = 0;
while (has_next()) {
char c = peek();
if (is_newline_char(c)) {
get();
break;
}
else if (std::isspace(c)) {
consume_whitespace();
}
else {
vec[componentIndex] = parse_float();
++componentIndex;
}
}
return vec;
}
uint32_t OBJ::Parser::parse_uint() {
auto* start = m_curr;
while (has_next() && std::isdigit(*m_curr)) {
++m_curr;
}
uint32_t result{};
std::from_chars(start, m_curr, result);
return result;
}
float OBJ::Parser::parse_float() {
auto* start = m_curr;
while (has_next() && is_float_char(*m_curr)) {
++m_curr;
}
float result{};
std::from_chars(start, m_curr, result);
return result;
}
void OBJ::Parser::consume_whitespace() {
while (has_next() && std::isspace(peek())) {
++m_curr;
}
}
char OBJ::Parser::get() {
char res = *m_curr;
++m_curr;
return res;
}
char OBJ::Parser::peek() const {
return *m_curr;
}
bool OBJ::Parser::has_next() const {
return !m_error && m_curr != m_end;
}
void OBJ::Parser::raise_error() {
m_error = true;
}
bool OBJ::Parser::is_newline_char(char c) {
return c == '\r' || c == '\n';
}
bool OBJ::Parser::is_float_char(char c) {
return c == '-' || c == '.' || std::isdigit(c);
}
|
whupdup/frame
|
real/asset/obj_loader.cpp
|
C++
|
gpl-3.0
| 6,006
|
#pragma once
#include <string_view>
namespace ZN { class IndexedModel; }
namespace ZN::OBJ {
using LoadCallback = void(const std::string_view&, IndexedModel&&);
void load(const char* fileName, LoadCallback* callback);
}
|
whupdup/frame
|
real/asset/obj_loader.hpp
|
C++
|
gpl-3.0
| 227
|
target_sources(LibCommon PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/logging.cpp"
#"${CMAKE_CURRENT_SOURCE_DIR}/profiler.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/system_info.cpp"
)
if (CMAKE_USE_WIN32_THREADS_INIT)
target_sources(LibCommon PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/threading_win32.cpp"
)
elseif (CMAKE_USE_PTHREADS_INIT)
target_sources(LibCommon PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/threading_pthread.cpp"
)
endif()
|
whupdup/frame
|
real/core/CMakeLists.txt
|
Text
|
gpl-3.0
| 427
|
#pragma once
#include <initializer_list>
namespace ZN {
template <typename T>
constexpr T max(std::initializer_list<T> iList) {
auto largest = iList.begin();
for (auto it = largest + 1, end = iList.end(); it != end; ++it) {
if (*it > *largest) {
largest = it;
}
}
return *largest;
}
}
|
whupdup/frame
|
real/core/algorithms.hpp
|
C++
|
gpl-3.0
| 303
|
#pragma once
#include <core/common.hpp>
namespace ZN {
// https://awesomekling.github.io/Serenity-C++-patterns-The-Badge/
template <typename T>
class Badge {
public:
using type = T;
private:
friend T;
constexpr Badge() = default;
NULL_COPY_AND_ASSIGN(Badge);
};
}
|
whupdup/frame
|
real/core/badge.hpp
|
C++
|
gpl-3.0
| 281
|
#pragma once
#include <cstddef>
namespace ZN {
class OutputStream {
public:
virtual size_t write(const void* buffer, size_t size) = 0;
virtual ~OutputStream() = default;
};
class InputStream {
public:
virtual int get() = 0;
virtual size_t read(void* buffer, size_t size) = 0;
virtual const void* get_buffer() const = 0;
virtual size_t get_size() const = 0;
virtual bool has_next() const = 0;
virtual ~InputStream() = default;
};
}
|
whupdup/frame
|
real/core/base_stream.hpp
|
C++
|
gpl-3.0
| 458
|
#pragma once
#include <cstdint>
#include <cstddef>
namespace ZN {
enum class IterationDecision {
CONTINUE,
BREAK
};
}
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(_WIN64) || defined(WIN64)
#define OPERATING_SYSTEM_WINDOWS
#elif defined(__linux__)
#define OPERATING_SYSTEM_LINUX
#elif defined(__APPLE__)
#define OPERATING_SYSTEM_MACOS
#else
#define OPERATING_SYSTEM_OTHER
#endif
#if defined(__clang__)
#define COMPILER_CLANG
#elif defined(__GNUC__) || defined(__GNUG__)
#define COMPILER_GCC
#elif defined(_MSC_VER)
#define COMPILER_MSVC
#else
#define COMPILER_OTHER
#endif
#ifdef COMPILER_MSVC
#define ZN_FORCEINLINE __forceinline
#define ZN_NEVERINLINE __declspec(noinline)
#elif defined(COMPILER_CLANG) || defined(COMPILER_GCC)
#define ZN_FORCEINLINE inline __attribute__((always_inline))
#define ZN_NEVERINLINE __attribute__((noinline))
#else
#define ZN_FORCEINLINE inline
#define ZN_NEVERINLINE
#endif
#define CONCAT_LABEL_(prefix, suffix) prefix##suffix
#define CONCAT_LABEL(prefix, suffix) CONCAT_LABEL_(prefix, suffix)
#define MAKE_UNIQUE_VARIABLE_NAME(prefix) CONCAT(prefix##_, __LINE__)
#define NULL_COPY_AND_ASSIGN(ClassName) \
ClassName(const ClassName&) = delete; \
void operator=(const ClassName&) = delete; \
ClassName(ClassName&&) = delete; \
void operator=(ClassName&&) = delete
#define DEFAULT_COPY_AND_ASSIGN(ClassName) \
ClassName(const ClassName&) = default; \
ClassName& operator=(const ClassName&) = default; \
ClassName(ClassName&&) = default; \
ClassName& operator=(ClassName&&) = default
|
whupdup/frame
|
real/core/common.hpp
|
C++
|
gpl-3.0
| 1,605
|
#pragma once
#include <cstdint>
#include <functional>
namespace ZN::Event {
template <typename... Args>
class Dispatcher {
public:
using function_type = void(Args...);
Dispatcher() = default;
Dispatcher(Dispatcher&& other) noexcept
: m_head(other.m_head) {
other.m_head = nullptr;
}
Dispatcher& operator=(Dispatcher&& other) noexcept {
clean_up();
m_head = other.m_head;
other.m_head = nullptr;
return *this;
}
Dispatcher(const Dispatcher&) = delete;
void operator=(const Dispatcher&) = delete;
class Connection {
template <typename Functor>
explicit Connection(Functor&& func, Connection* next)
: m_function(func)
, m_next(next) {}
std::function<function_type> m_function;
Connection* m_next;
friend class Dispatcher;
};
~Dispatcher() {
clean_up();
}
template <typename Functor>
Connection* connect(Functor&& func) {
m_head = new Connection(func, m_head);
return m_head;
}
void disconnect(Connection* con) {
if (con == m_head) {
m_head = m_head->m_next;
delete con;
return;
}
auto* last = m_head;
auto* curr = m_head->m_next;
while (curr) {
if (curr == con) {
last->m_next = con->m_next;
delete con;
return;
}
last = curr;
curr = curr->m_next;
}
}
template <typename... Args2>
void fire(Args2&&... args) {
auto* curr = m_head;
while (curr) {
curr->m_function(std::forward<Args2>(args)...);
curr = curr->m_next;
}
}
bool empty() const {
return m_head == nullptr;
}
private:
Connection* m_head = nullptr;
void clean_up() {
auto* curr = m_head;
while (curr) {
auto* next = curr->m_next;
delete curr;
curr = next;
}
m_head = nullptr;
}
};
}
|
whupdup/frame
|
real/core/events.hpp
|
C++
|
gpl-3.0
| 1,774
|
#pragma once
#include <cstddef>
#include <iterator>
namespace ZN {
template <typename T, size_t N>
struct FixedArray {
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = value_type*;
using const_pointer = const value_type*;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
T m_data[N];
constexpr iterator begin() noexcept {
return m_data;
}
constexpr iterator end() noexcept {
return m_data + N;
}
constexpr const_iterator begin() const noexcept {
return m_data;
}
constexpr const_iterator end() const noexcept {
return m_data + N;
}
constexpr reverse_iterator rbegin() noexcept {
return reverse_iterator{m_data + N};
}
constexpr reverse_iterator rend() noexcept {
return reverse_iterator{m_data};
}
constexpr const_reverse_iterator rbegin() const noexcept {
return const_reverse_iterator{m_data + N};
}
constexpr const_reverse_iterator rend() const noexcept {
return const_reverse_iterator{m_data};
}
constexpr reference operator[](size_type index) {
return m_data[index];
}
constexpr const_reference operator[](size_type index) const {
return m_data[index];
}
constexpr reference front() {
return *m_data;
}
constexpr const_reference front() const {
return *m_data;
}
constexpr reference back() {
return m_data[N - 1];
}
constexpr const_reference back() const {
return m_data[N - 1];
}
constexpr bool empty() const {
return N == 0;
}
constexpr pointer data() {
return m_data;
}
constexpr const_pointer data() const {
return m_data;
}
constexpr size_type size() const {
return N;
}
};
template <typename T, typename... U>
FixedArray(T, U...) -> FixedArray<T, 1 + sizeof...(U)>;
}
|
whupdup/frame
|
real/core/fixed_array.hpp
|
C++
|
gpl-3.0
| 1,956
|
#pragma once
#include <cstddef>
namespace ZN {
template <typename T, size_t N>
struct BasicFixedString {
constexpr BasicFixedString(const T (&src)[N]) {
for (size_t i = 0; i < N; ++i) {
m_data[i] = src[i];
}
}
T m_data[N] = {};
constexpr operator const T*() const {
return m_data;
}
};
template <size_t N>
using FixedString = BasicFixedString<char, N>;
}
|
whupdup/frame
|
real/core/fixed_string.hpp
|
C++
|
gpl-3.0
| 377
|
#pragma once
#include <core/memory.hpp>
#include <iterator>
#include <type_traits>
namespace ZN {
template <typename T, size_t Capacity>
class FixedVector {
public:
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = value_type*;
using const_pointer = const value_type*;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
constexpr FixedVector() noexcept = default;
constexpr explicit FixedVector(size_type count) noexcept
requires std::is_default_constructible_v<T>
: m_size(count) {
Memory::uninitialized_default_construct_n(data(), m_size);
}
constexpr FixedVector(FixedVector&& other) noexcept
requires std::is_move_constructible_v<T>
: m_size(other.m_size) {
Memory::uninitialized_move_n(other.data(), other.m_size, data());
other.m_size = 0;
}
constexpr ~FixedVector() noexcept {
Memory::destroy_n(data(), m_size);
}
constexpr FixedVector(const FixedVector& other) noexcept
requires std::is_copy_constructible_v<T>
: m_size(other.m_size) {
Memory::uninitialized_copy_n(other.data(), other.m_size, data());
}
constexpr FixedVector& operator=(FixedVector&& other) noexcept
requires std::is_move_constructible_v<T> {
if (this != &other) {
Memory::destroy_n(data(), m_size);
Memory::uninitialized_move_n(other.data(), other.m_size, data());
m_size = other.m_size;
other.m_size = 0;
}
return *this;
}
constexpr FixedVector& operator=(const FixedVector& other) noexcept
requires std::is_copy_constructible_v<T> {
if (this != &other) {
Memory::destroy_n(data(), m_size);
Memory::uninitialized_copy_n(other.data(), other.m_size, data());
m_size = other.m_size;
}
return *this;
}
constexpr iterator begin() noexcept {
return data();
}
constexpr iterator end() noexcept {
return data() + m_size;
}
constexpr const_iterator begin() const noexcept {
return data();
}
constexpr const_iterator end() const noexcept {
return data() + m_size;
}
constexpr reverse_iterator rbegin() noexcept {
return reverse_iterator{data() + m_size};
}
constexpr reverse_iterator rend() noexcept {
return reverse_iterator{data()};
}
constexpr const_reverse_iterator rbegin() const noexcept {
return const_reverse_iterator{data() + m_size};
}
constexpr const_reverse_iterator rend() const noexcept {
return const_reverse_iterator{data()};
}
constexpr void push_back(value_type&& value) {
Memory::construct_at(data() + m_size, std::move(value));
++m_size;
}
constexpr void push_back(const value_type& value) {
Memory::construct_at(data() + m_size, value);
++m_size;
}
template <typename... Args>
constexpr void emplace_back(Args&&... args) {
Memory::construct_at(data() + m_size, std::forward<Args>(args)...);
++m_size;
}
constexpr void pop_back() {
Memory::destroy_at(data() + m_size);
--m_size;
}
constexpr void clear() {
Memory::destroy_n(data(), m_size);
m_size = 0;
}
constexpr reference operator[](size_type index) {
return data()[index];
}
constexpr const_reference operator[](size_type index) const {
return data()[index];
}
constexpr reference front() {
return *data();
}
constexpr const_reference front() const {
return *data();
}
constexpr reference back() {
return data()[m_size - 1];
}
constexpr const_reference back() const {
return data()[m_size - 1];
}
constexpr bool empty() const {
return m_size == 0;
}
constexpr pointer data() {
return reinterpret_cast<pointer>(m_data);
}
constexpr const_pointer data() const {
return reinterpret_cast<const_pointer>(m_data);
}
constexpr size_type size() const {
return m_size;
}
constexpr size_type capacity() const {
return Capacity;
}
private:
alignas(T) uint8_t m_data[Capacity * sizeof(T)] = {};
size_t m_size = 0;
};
}
|
whupdup/frame
|
real/core/fixed_vector.hpp
|
C++
|
gpl-3.0
| 4,186
|
#pragma once
#include <cstdint>
namespace ZN {
class HashBuilder {
public:
using Hash_T = uint64_t;
constexpr HashBuilder& add_uint32(uint32_t value) {
m_hash = static_cast<Hash_T>(m_hash * 0x100000001B3ull) ^ static_cast<Hash_T>(value);
return *this;
}
constexpr HashBuilder& add_int32(int32_t value) {
add_uint32(static_cast<uint32_t>(value));
return *this;
}
constexpr HashBuilder& add_float(float value) {
union {
float f;
uint32_t i;
} punnedValue;
punnedValue.f = value;
add_uint32(punnedValue.i);
return *this;
}
constexpr HashBuilder& add_uint64(uint64_t value) {
add_uint32(static_cast<uint32_t>(value & 0xFF'FF'FF'FFu));
add_uint32(static_cast<uint32_t>(value >> 32));
return *this;
}
constexpr HashBuilder& add_int64(int64_t value) {
add_uint64(static_cast<uint64_t>(value));
return *this;
}
template <typename T>
constexpr HashBuilder& add_pointer(T* value) {
add_uint64(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(value)));
return *this;
}
constexpr HashBuilder& add_string(const char* str) {
add_uint32(0xFFu);
while (*str) {
add_uint32(static_cast<uint32_t>(*str));
++str;
}
return *this;
}
constexpr HashBuilder& add_string(const char* str, size_t size) {
add_uint32(0xFFu);
for (auto* end = str + size; str != end; ++str) {
add_uint32(static_cast<uint32_t>(*str));
}
return *this;
}
template <typename StringLike>
constexpr HashBuilder& add_string(const StringLike& str) {
add_uint32(0xFFu);
for (auto c : str) {
add_uint32(static_cast<uint32_t>(c));
}
return *this;
}
constexpr Hash_T get() const {
return m_hash;
}
private:
// FNV-11a magic number
Hash_T m_hash = 0xCBF29CE484222325ull;
};
}
|
whupdup/frame
|
real/core/hash_builder.hpp
|
C++
|
gpl-3.0
| 1,806
|
#pragma once
#include <core/common.hpp>
#include <atomic>
#include <memory>
#include <utility>
namespace ZN::Memory {
class SingleThreadCounter {
public:
void add_ref() {
++m_count;
}
bool release() {
--m_count;
return m_count == 0;
}
private:
size_t m_count = 1;
};
class MultiThreadCounter {
public:
MultiThreadCounter() {
m_count.store(1, std::memory_order_relaxed);
}
void add_ref() {
m_count.fetch_add(1, std::memory_order_relaxed);
}
bool release() {
auto result = m_count.fetch_sub(1, std::memory_order_acq_rel);
return result == 1;
}
private:
std::atomic_size_t m_count;
};
template <typename T>
class IntrusivePtr;
template <typename T, typename Deletor = std::default_delete<T>,
typename Counter = SingleThreadCounter>
class IntrusivePtrEnabled {
public:
using pointer_type = IntrusivePtr<T>;
using base_type = T;
using deletor_type = Deletor;
using counter_type = Counter;
explicit IntrusivePtrEnabled() = default;
NULL_COPY_AND_ASSIGN(IntrusivePtrEnabled);
void release_ref() {
if (m_counter.release()) {
Deletor{}(static_cast<T*>(this));
}
}
void add_ref() {
m_counter.add_ref();
}
IntrusivePtr<T> reference_from_this();
private:
Counter m_counter;
};
template <typename T>
class IntrusivePtr {
public:
template <typename U>
friend class IntrusivePtr;
IntrusivePtr() = default;
IntrusivePtr(std::nullptr_t)
: m_data(nullptr) {}
template <typename U>
explicit IntrusivePtr(U* handle)
: m_data(static_cast<T*>(handle)) {}
T& operator*() const noexcept {
return *m_data;
}
T* operator->() const noexcept {
return m_data;
}
explicit operator bool() const {
return m_data != nullptr;
}
bool operator==(const IntrusivePtr& other) const {
return m_data == other.m_data;
}
bool operator!=(const IntrusivePtr& other) const {
return m_data != other.m_data;
}
T* get() const noexcept {
return m_data;
}
void reset() {
using ReferenceBase = IntrusivePtrEnabled<typename T::base_type,
typename T::deletor_type, typename T::counter_type>;
if (m_data) {
static_cast<ReferenceBase*>(m_data)->release_ref();
}
m_data = nullptr;
}
template <typename U>
IntrusivePtr& operator=(const IntrusivePtr<U>& other) {
static_assert(std::is_base_of<T, U>::value,
"Cannot safely assign downcasted intrusive pointers");
using ReferenceBase = IntrusivePtrEnabled<typename T::base_type,
typename T::deletor_type, typename T::counter_type>;
reset();
m_data = static_cast<T*>(other.m_data);
if (m_data) {
static_cast<ReferenceBase*>(m_data)->add_ref();
}
return *this;
}
IntrusivePtr& operator=(const IntrusivePtr& other) {
using ReferenceBase = IntrusivePtrEnabled<typename T::base_type,
typename T::deletor_type, typename T::counter_type>;
if (this != &other) {
reset();
m_data = other.m_data;
if (m_data) {
static_cast<ReferenceBase*>(m_data)->add_ref();
}
}
return *this;
}
template <typename U>
IntrusivePtr(const IntrusivePtr<U>& other) {
*this = other;
}
IntrusivePtr(const IntrusivePtr& other) {
*this = other;
}
~IntrusivePtr() {
reset();
}
template <typename U>
IntrusivePtr& operator=(IntrusivePtr<U>&& other) noexcept {
reset();
m_data = other.m_data;
other.m_data = nullptr;
return *this;
}
IntrusivePtr& operator=(IntrusivePtr&& other) noexcept {
if (this != &other) {
reset();
m_data = other.m_data;
other.m_data = nullptr;
}
return *this;
}
template <typename U>
IntrusivePtr(IntrusivePtr<U>&& other) noexcept {
*this = std::move(other);
}
IntrusivePtr(IntrusivePtr&& other) noexcept {
*this = std::move(other);
}
template <typename U>
bool owner_before(const IntrusivePtr<U>& other) const noexcept {
return m_data < other.m_data;
}
template <typename U>
bool operator<(const IntrusivePtr<U>& other) const noexcept {
return owner_before(other);
}
private:
T* m_data = nullptr;
};
template <typename T, typename Deletor, typename Counter>
IntrusivePtr<T> IntrusivePtrEnabled<T, Deletor, Counter>::reference_from_this() {
add_ref();
return IntrusivePtr<T>(static_cast<T*>(this));
}
template <typename T, typename... Args>
inline IntrusivePtr<T> make_intrusive(Args&&... args) {
return IntrusivePtr<T>(new T(std::forward<Args>(args)...));
}
template <typename T, typename U, typename... Args>
inline typename T::pointer_type make_intrusive(Args&&... args) {
return typename T::pointer_type(new U(std::forward<Args>(args)...));
}
template <typename T, typename U>
inline Memory::IntrusivePtr<T> intrusive_pointer_cast(Memory::IntrusivePtr<U> p) {
using ReferenceBaseT = IntrusivePtrEnabled<typename T::base_type,
typename T::deletor_type, typename T::counter_type>;
using ReferenceBaseU = IntrusivePtrEnabled<typename U::base_type,
typename U::deletor_type, typename U::counter_type>;
static_assert(std::is_same_v<ReferenceBaseT, ReferenceBaseU>,
"Downcasting IntrusivePtr is only safe if they share the same reference base");
if (p) {
static_cast<ReferenceBaseU*>(p.get())->add_ref();
}
return Memory::IntrusivePtr<T>(static_cast<T*>(p.get()));
}
template <typename T>
using ThreadSafeIntrusivePtrEnabled = IntrusivePtrEnabled<T, std::default_delete<T>,
MultiThreadCounter>;
template <typename T>
IntrusivePtr(T*) -> IntrusivePtr<T>;
}
namespace ZN {
using Memory::IntrusivePtr;
}
namespace std {
template <typename T>
struct hash<ZN::Memory::IntrusivePtr<T>> {
size_t operator()(const ZN::Memory::IntrusivePtr<T>& p) const {
return std::hash<T*>{}(p.get());
}
};
}
|
whupdup/frame
|
real/core/intrusive_ptr.hpp
|
C++
|
gpl-3.0
| 5,721
|
#pragma once
#include <cassert>
#include <cstdint>
#include <new>
#include <utility>
namespace ZN {
template <typename T>
class Local {
public:
Local() noexcept = default;
Local(const Local&) = delete;
void operator=(const Local&) = delete;
Local(Local&&) = delete;
void operator=(Local&&) = delete;
template <typename... Args>
void create(Args&&... args) noexcept {
assert(!ptr && "Local::create(): ptr must be null");
ptr = new (data) T(std::forward<Args>(args)...);
}
void destroy() noexcept {
assert(ptr && "Local::destroy(): ptr must not be null");
ptr->~T();
ptr = nullptr;
}
T& operator*() noexcept {
assert(ptr && "Local::operator*(): ptr must not be null");
return *ptr;
}
T* operator->() const noexcept {
assert(ptr && "Local::operator->(): ptr must not be null");
return ptr;
}
T* get() const noexcept {
return ptr;
}
operator bool() const noexcept {
return ptr != nullptr;
}
~Local() noexcept {
if (ptr) {
destroy();
}
}
private:
alignas(T) uint8_t data[sizeof(T)] = {};
T* ptr = nullptr;
};
}
|
whupdup/frame
|
real/core/local.hpp
|
C++
|
gpl-3.0
| 1,110
|
#include "logging.hpp"
#include <core/threading.hpp>
#include <cstdarg>
#include <cstdio>
using namespace ZN;
static const char* LOG_LEVEL_STRING[] = {
"ERROR",
"WARNING",
"DEBUG",
"TEMP"
};
/*#ifdef NDEBUG
static LogLevel g_logLevel = LOG_LEVEL_ERROR;
#else
static LogLevel g_logLevel = LOG_LEVEL_TEMP;
#endif*/
static LogLevel g_logLevel = LogLevel::TEMP;
void ZN::log_format(LogLevel logLevel, const char* category, const char* file, unsigned line,
const char* fmt, ...) {
if (logLevel > g_logLevel) {
return;
}
fprintf(stderr, "[%s] [%s] (#%llu) (%s:%u): ",
LOG_LEVEL_STRING[static_cast<unsigned>(logLevel)], category,
OS::Thread::get_current_thread_id(), file, line);
va_list args;
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fputc('\n', stderr);
}
void ZN::set_log_level(LogLevel logLevel) {
g_logLevel = logLevel;
}
|
whupdup/frame
|
real/core/logging.cpp
|
C++
|
gpl-3.0
| 878
|
#pragma once
namespace ZN {
enum class LogLevel {
ERROR = 0,
WARNING,
DEBUG,
TEMP,
COUNT
};
}
#define LOG(level, category, fmt, ...) \
ZN::log_format(level, category, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
#define LOG_ERROR(category, fmt, ...) \
LOG(ZN::LogLevel::ERROR, category, fmt, ##__VA_ARGS__)
#define LOG_ERROR2(category, msg) \
LOG(ZN::LogLevel::ERROR, category, "%s", msg)
#define LOG_WARNING(category, fmt, ...) \
LOG(ZN::LogLevel::WARNING, category, fmt, ##__VA_ARGS__)
#define LOG_WARNING2(category, msg) \
LOG(ZN::LogLevel::WARNING, category, "%s", msg)
#define LOG_DEBUG(category, fmt, ...) \
LOG(ZN::LogLevel::DEBUG, category, fmt, ##__VA_ARGS__)
#define LOG_TEMP(fmt, ...) \
LOG(ZN::LogLevel::TEMP, "TEMP", fmt, ##__VA_ARGS__)
#define LOG_TEMP2(msg) \
LOG(ZN::LogLevel::TEMP, "TEMP", "%s", msg)
namespace ZN {
void log_format(LogLevel logLevel, const char* category, const char* file,
unsigned line, const char* fmt, ...);
void set_log_level(LogLevel logLevel);
}
|
whupdup/frame
|
real/core/logging.hpp
|
C++
|
gpl-3.0
| 1,012
|
#pragma once
#include <memory>
namespace ZN::Memory {
using std::construct_at;
using std::uninitialized_move;
using std::uninitialized_move_n;
using std::uninitialized_copy;
using std::uninitialized_copy_n;
using std::uninitialized_default_construct;
using std::uninitialized_default_construct_n;
using std::destroy;
using std::destroy_at;
using std::destroy_n;
template <typename T>
using SharedPtr = std::shared_ptr<T>;
template <typename T>
using UniquePtr = std::unique_ptr<T>;
using std::static_pointer_cast;
}
|
whupdup/frame
|
real/core/memory.hpp
|
C++
|
gpl-3.0
| 524
|
#pragma once
namespace ZN {
template <typename K, typename V>
struct Pair {
K first;
V second;
};
template <typename K, typename V>
Pair(K, V) -> Pair<K, V>;
}
|
whupdup/frame
|
real/core/pair.hpp
|
C++
|
gpl-3.0
| 167
|
#pragma once
#include <core/memory.hpp>
#include <cassert>
#include <cstdlib>
namespace ZN {
template <typename T>
class Queue;
template <typename T>
struct QueueIterator {
using container_type = Queue<T>;
using value_type = T;
using reference = T&;
using size_type = size_t;
using difference_type = ptrdiff_t;
QueueIterator& operator++() {
++m_index;
return *this;
}
reference operator*() const {
return (*m_container)[m_index];
}
bool operator==(const QueueIterator& other) const {
return m_container == other.m_container && m_index == other.m_index;
}
bool operator!=(const QueueIterator& other) const {
return !(*this == other);
}
container_type* m_container;
size_type m_index;
};
template <typename T>
class Queue {
public:
using value_type = T;
using size_type = size_t;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = T*;
using iterator = QueueIterator<value_type>;
using const_iterator = QueueIterator<const value_type>;
constexpr explicit Queue()
: m_data(nullptr)
, m_dataEnd(nullptr)
, m_size(0)
, m_front(0)
, m_back(static_cast<size_t>(0ull) - static_cast<size_t>(1ull)) {}
~Queue() {
if (m_data) {
Memory::destroy(m_data, m_dataEnd);
std::free(m_data);
}
}
Queue& operator=(Queue&& other) noexcept {
m_data = other.m_data;
m_dataEnd = other.m_dataEnd;
m_size = other.m_size;
m_front = other.m_front;
m_back = other.m_back;
other.m_data = nullptr;
other.m_dataEnd = nullptr;
return *this;
}
Queue(Queue&& other) noexcept {
*this = std::move(other);
}
Queue(const Queue&) = delete;
void operator=(const Queue&) = delete;
template <typename... Args>
void emplace_back(Args&&... args) {
ensure_capacity();
m_back = (m_back + 1) % capacity();
Memory::construct_at(&m_data[m_back], std::forward<Args>(args)...);
++m_size;
}
void push_back(const T& value) {
emplace_back(value);
}
void push_back(T&& value) {
emplace_back(std::move(value));
}
void pop_front() {
assert(!empty() && "pop_front(): Attempt to pop an empty queue");
Memory::destroy_at(&m_data[m_front]);
m_front = (m_front + 1) % capacity();
--m_size;
}
void clear() {
auto cap = capacity();
if (m_front < m_back) {
Memory::destroy(m_data + m_front, m_data + m_back + 1);
}
else {
Memory::destroy(m_data + m_front, m_data + m_size);
Memory::destroy(m_data, m_data + m_back + 1);
}
m_front = 0;
m_back = cap - 1;
m_size = 0;
}
iterator begin() {
return iterator{this, 0};
}
iterator end() {
return iterator{this, m_size};
}
reference operator[](size_type index) {
assert(!empty() && "Queue::operator[]: attempt to access an empty queue");
return m_data[(m_front + index) % capacity()];
}
const_reference operator[](size_type index) const {
assert(!empty() && "Queue::operator[]: attempt to access an empty queue");
return m_data[(m_front + index) % capacity()];
}
reference front() {
assert(!empty() && "Queue::front(): attempt to access an empty queue");
return m_data[m_front];
}
const_reference front() const {
assert(!empty() && "Queue::front(): attempt to access an empty queue");
return m_data[m_front];
}
reference back() {
assert(!empty() && "Queue::back(): attempt to access an empty queue");
return m_data[m_back];
}
const_reference back() const {
assert(!empty() && "Queue::back(): attempt to access an empty queue");
return m_data[m_back];
}
bool empty() const {
return m_size == 0;
}
size_type size() const {
return m_size;
}
size_type capacity() const {
return m_dataEnd - m_data;
}
private:
T* m_data;
T* m_dataEnd;
size_type m_size;
size_type m_front;
size_type m_back;
void ensure_capacity() {
auto cap = capacity();
if (m_size == cap) {
reserve_elements((cap != 0) * (2 * cap) + (cap == 0) * 1);
}
}
void reserve_elements(size_t numElements) {
auto* newBegin = reinterpret_cast<T*>(std::malloc(numElements * sizeof(T)));
auto* newEnd = newBegin + numElements;
if (m_size > 0) {
if (m_front < m_back) {
Memory::uninitialized_move(m_data + m_front, m_data + m_back + 1, newBegin);
}
else {
Memory::uninitialized_move(m_data + m_front, m_dataEnd, newBegin);
Memory::uninitialized_move(m_data, m_data + m_back + 1,
newBegin + (capacity() - m_front));
}
m_front = 0;
m_back = m_size - 1;
Memory::destroy(m_data, m_dataEnd);
}
std::free(m_data);
m_data = newBegin;
m_dataEnd = newEnd;
}
};
}
|
whupdup/frame
|
real/core/queue.hpp
|
C++
|
gpl-3.0
| 4,666
|
#pragma once
#include <core/intrusive_ptr.hpp>
namespace ZN {
template <typename MutexType>
class ScopedLock final {
public:
explicit ScopedLock(MutexType& mutex)
: m_mutex(mutex.reference_from_this())
, m_locked(false) {
lock();
}
~ScopedLock() {
unlock();
}
NULL_COPY_AND_ASSIGN(ScopedLock);
void lock() {
if (!m_locked) {
m_locked = true;
m_mutex->lock();
}
}
bool try_lock() {
if (!m_locked) {
m_locked = try_lock();
return m_locked;
}
return false;
}
void unlock() {
if (m_locked) {
m_mutex->unlock();
m_locked = false;
}
}
bool owns_lock() const {
return m_locked;
}
operator bool() const {
return owns_lock();
}
private:
Memory::IntrusivePtr<MutexType> m_mutex;
bool m_locked;
};
}
|
whupdup/frame
|
real/core/scoped_lock.hpp
|
C++
|
gpl-3.0
| 804
|
#pragma once
#include <cstddef>
#include <iterator>
namespace ZN {
template <typename T>
class Span {
public:
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = value_type*;
using const_pointer = const value_type*;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
constexpr explicit Span() = default;
constexpr Span(Span&&) noexcept = default;
constexpr Span& operator=(Span&&) noexcept = default;
constexpr Span(const Span&) noexcept = default;
constexpr Span& operator=(const Span&) noexcept = default;
template <typename It>
constexpr Span(It data, size_type size)
: m_data(data)
, m_size(size) {}
template <typename It>
constexpr Span(It start, It end)
: m_data(start)
, m_size(end - start) {}
template <typename Spannable>
constexpr Span(Spannable& s)
: m_data(s.data())
, m_size(s.size()) {}
template <typename Spannable>
constexpr Span(const Spannable& s)
: m_data(s.data())
, m_size(s.size()) {}
constexpr iterator begin() noexcept {
return m_data;
}
constexpr iterator end() noexcept {
return m_data + m_size;
}
constexpr const_iterator begin() const noexcept {
return m_data;
}
constexpr const_iterator end() const noexcept {
return m_data + m_size;
}
constexpr reference operator[](size_type index) {
return m_data[index];
}
constexpr const_reference operator[](size_type index) const {
return m_data[index];
}
constexpr reference front() {
return *m_data;
}
constexpr const_reference front() const {
return *m_data;
}
constexpr reference back() {
return m_data[m_size - 1];
}
constexpr const_reference back() const {
return m_data[m_size - 1];
}
constexpr pointer data() {
return m_data;
}
constexpr const_pointer data() const {
return m_data;
}
constexpr size_type size() const {
return m_size;
}
constexpr bool empty() const {
return m_size == 0;
}
private:
T* m_data = {};
size_type m_size = {};
};
}
|
whupdup/frame
|
real/core/span.hpp
|
C++
|
gpl-3.0
| 2,296
|
#include "system_info.hpp"
#include <core/common.hpp>
using namespace ZN;
#if defined(OPERATING_SYSTEM_WINDOWS)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
static SYSTEM_INFO g_systemInfo;
static bool g_initialized = false;
static void try_init_system_info();
uint32_t SystemInfo::get_num_processors() {
try_init_system_info();
return static_cast<uint32_t>(g_systemInfo.dwNumberOfProcessors);
}
size_t SystemInfo::get_page_size() {
try_init_system_info();
return static_cast<size_t>(g_systemInfo.dwPageSize);
}
static void try_init_system_info() {
if (g_initialized) {
return;
}
g_initialized = true;
GetSystemInfo(&g_systemInfo);
}
#elif defined(OPERATING_SYSTEM_LINUX)
#include <sys/sysinfo.h>
#include <unistd.h>
static size_t g_pageSize = 0;
uint32_t SystemInfo::get_num_processors() {
return static_cast<uint32_t>(get_nprocs());
}
size_t SystemInfo::get_page_size() {
if (g_pageSize == 0) {
g_pageSize = getpagesize();
}
return g_pageSize;
}
#endif
|
whupdup/frame
|
real/core/system_info.cpp
|
C++
|
gpl-3.0
| 993
|
#pragma once
#include <cstddef>
#include <cstdint>
namespace ZN::SystemInfo {
uint32_t get_num_processors();
size_t get_page_size();
}
|
whupdup/frame
|
real/core/system_info.hpp
|
C++
|
gpl-3.0
| 140
|
#pragma once
#include <core/scoped_lock.hpp>
namespace ZN::OS {
using ThreadProc = void(void*);
class Thread;
class Mutex;
class ConditionVariable;
struct ThreadDeletor {
void operator()(Thread*) const noexcept;
};
class Thread : public Memory::IntrusivePtrEnabled<Thread, ThreadDeletor,
Memory::MultiThreadCounter> {
public:
static Memory::IntrusivePtr<Thread> create(ThreadProc* threadProc, void* userData);
static void sleep(uint64_t millis);
static uint64_t get_current_thread_id();
NULL_COPY_AND_ASSIGN(Thread);
void join();
void set_cpu_affinity(uint64_t affinityMask);
void block_signals();
private:
explicit Thread() = default;
friend struct ThreadImpl;
};
struct MutexDeletor {
void operator()(Mutex*) const noexcept;
};
class Mutex : public Memory::IntrusivePtrEnabled<Mutex, MutexDeletor, Memory::MultiThreadCounter> {
public:
static Memory::IntrusivePtr<Mutex> create();
NULL_COPY_AND_ASSIGN(Mutex);
void lock();
bool try_lock();
void unlock();
private:
explicit Mutex() = default;
friend struct MutexImpl;
};
struct ConditionVariableDeletor {
void operator()(ConditionVariable*) const noexcept;
};
class ConditionVariable : public Memory::IntrusivePtrEnabled<ConditionVariable,
ConditionVariableDeletor, Memory::MultiThreadCounter> {
public:
static Memory::IntrusivePtr<ConditionVariable> create();
NULL_COPY_AND_ASSIGN(ConditionVariable);
void wait(Mutex&);
void notify_one();
void notify_all();
private:
explicit ConditionVariable() = default;
friend struct ConditionVariableImpl;
};
}
|
whupdup/frame
|
real/core/threading.hpp
|
C++
|
gpl-3.0
| 1,583
|
#include "threading.hpp"
#include <pthread.h>
#include <sched.h>
#include <unistd.h>
using namespace ZN;
using namespace ZN::OS;
namespace ZN::OS {
struct ThreadImpl final : public Thread {
pthread_t handle;
ThreadProc* proc;
void* userData;
};
struct MutexImpl final : public Mutex {
pthread_mutex_t handle;
};
struct ConditionVariableImpl final : public ConditionVariable {
pthread_cond_t handle;
};
}
static void* thread_proc(void* userData);
// Thread
void ThreadDeletor::operator()(Thread* pThread) const noexcept {
if (pThread) {
pThread->join();
}
std::default_delete<ThreadImpl>{}(reinterpret_cast<ThreadImpl*>(pThread));
}
Memory::IntrusivePtr<Thread> Thread::create(ThreadProc* proc, void* userData) {
Memory::IntrusivePtr<Thread> res(new ThreadImpl);
auto& thread = *static_cast<ThreadImpl*>(res.get());
thread.proc = proc;
thread.userData = userData;
if (pthread_create(&thread.handle, nullptr, thread_proc, &thread) != 0) {
return nullptr;
}
return res;
}
void Thread::sleep(uint64_t millis) {
usleep(millis * 1000ull);
}
void Thread::join() {
// FIXME: this returns something, check it
pthread_join(static_cast<ThreadImpl*>(this)->handle, nullptr);
}
#if defined(OPERATING_SYSTEM_LINUX)
#include <sys/syscall.h>
#include <signal.h>
uint64_t Thread::get_current_thread_id() {
return static_cast<uint64_t>(syscall(SYS_gettid));
}
void Thread::set_cpu_affinity(uint64_t affinityMask) {
cpu_set_t cpuSet{};
for (uint64_t i = 0, l = CPU_COUNT(&cpuSet); i < l; ++i) {
if (affinityMask & (1ull << i)) {
CPU_SET(i, &cpuSet);
}
}
// FIXME: this returns something, check it
pthread_setaffinity_np(static_cast<ThreadImpl*>(this)->handle, sizeof(cpuSet), &cpuSet);
}
void Thread::block_signals() {
sigset_t mask;
sigfillset(&mask);
pthread_sigmask(SIG_BLOCK, &mask, nullptr);
}
#else
#if defined(OPERATING_SYSTEM_WINDOWS)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
uint64_t Thread::get_current_thread_id() {
return static_cast<uint64_t>(GetCurrentThreadId());
}
#endif
void Thread::set_cpu_affinity(uint64_t affinityMask) {
(void)affinityMask;
}
void Thread::block_signals() {
}
#endif
static void* thread_proc(void* userData) {
auto* thread = reinterpret_cast<ThreadImpl*>(userData);
thread->proc(thread->userData);
return nullptr;
}
// Mutex
void MutexDeletor::operator()(Mutex* pMutex) const noexcept {
if (pMutex) {
pthread_mutex_destroy(&static_cast<MutexImpl*>(pMutex)->handle);
}
std::default_delete<MutexImpl>{}(reinterpret_cast<MutexImpl*>(pMutex));
}
Memory::IntrusivePtr<Mutex> Mutex::create() {
Memory::IntrusivePtr<Mutex> res(new MutexImpl);
if (pthread_mutex_init(&static_cast<MutexImpl*>(res.get())->handle, nullptr) != 0) {
return nullptr;
}
return res;
}
void Mutex::lock() {
// FIXME: this returns something, check it
pthread_mutex_lock(&static_cast<MutexImpl*>(this)->handle);
}
bool Mutex::try_lock() {
return pthread_mutex_trylock(&static_cast<MutexImpl*>(this)->handle) == 0;
}
void Mutex::unlock() {
// FIXME: this returns something, check it
pthread_mutex_unlock(&static_cast<MutexImpl*>(this)->handle);
}
// ConditionVariable
void ConditionVariableDeletor::operator()(ConditionVariable* pCVar) const noexcept {
if (pCVar) {
pthread_cond_destroy(&static_cast<ConditionVariableImpl*>(pCVar)->handle);
}
std::default_delete<ConditionVariableImpl>{}(reinterpret_cast<ConditionVariableImpl*>(pCVar));
}
Memory::IntrusivePtr<ConditionVariable> ConditionVariable::create() {
Memory::IntrusivePtr<ConditionVariable> res(new ConditionVariableImpl);
if (pthread_cond_init(&static_cast<ConditionVariableImpl*>(res.get())->handle, nullptr) != 0) {
return nullptr;
}
return res;
}
void ConditionVariable::wait(Mutex& mutex) {
// FIXME: this returns something, check it
pthread_cond_wait(&static_cast<ConditionVariableImpl*>(this)->handle,
&static_cast<MutexImpl&>(mutex).handle);
}
void ConditionVariable::notify_one() {
// FIXME: this returns something, check it
pthread_cond_signal(&static_cast<ConditionVariableImpl*>(this)->handle);
}
void ConditionVariable::notify_all() {
// FIXME: this returns something, check it
pthread_cond_broadcast(&static_cast<ConditionVariableImpl*>(this)->handle);
}
|
whupdup/frame
|
real/core/threading_pthread.cpp
|
C++
|
gpl-3.0
| 4,249
|
#include "threading.hpp"
#define WIN32_LEAN_AND_MEAN
#ifdef _WIN32_WINNT
#undef _WIN32_WINNT
#endif
#define _WIN32_WINNT 0x0600 // Vista
#include <windows.h>
#include <synchapi.h>
using namespace ZN;
using namespace ZN::OS;
namespace ZN::OS {
struct ThreadImpl final : public Thread {
HANDLE handle;
DWORD threadID;
ThreadProc* proc;
void* userData;
};
struct MutexImpl final : public Mutex {
SRWLOCK handle;
};
struct ConditionVariableImpl final : public ConditionVariable {
CONDITION_VARIABLE handle;
};
}
static DWORD WINAPI thread_proc(LPVOID userData);
// Thread
void ThreadDeletor::operator()(Thread* pThread) const noexcept {
if (pThread) {
pThread->join();
CloseHandle(static_cast<ThreadImpl*>(pThread)->handle);
}
std::default_delete<ThreadImpl>{}(reinterpret_cast<ThreadImpl*>(pThread));
}
Memory::IntrusivePtr<Thread> Thread::create(ThreadProc* proc, void* userData) {
Memory::IntrusivePtr<Thread> res(new ThreadImpl);
auto& thread = *static_cast<ThreadImpl*>(res.get());
thread.proc = proc;
thread.userData = userData;
thread.handle = CreateThread(nullptr, 0, thread_proc, &thread, 0, &thread.threadID);
if (thread.handle == INVALID_HANDLE_VALUE) {
return nullptr;
}
return res;
}
void Thread::sleep(uint64_t millis) {
Sleep(static_cast<DWORD>(millis));
}
uint64_t Thread::get_current_thread_id() {
return static_cast<uint64_t>(GetCurrentThreadId());
}
void Thread::join() {
// FIXME: this returns an error code, wrap in a WIN_CHECK or similar
WaitForSingleObject(static_cast<ThreadImpl*>(this)->handle, INFINITE);
}
void Thread::set_cpu_affinity(uint64_t affinityMask) {
// FIXME: this returns something, check it
SetThreadAffinityMask(static_cast<ThreadImpl*>(this)->handle, affinityMask);
}
void Thread::block_signals() {
}
static DWORD WINAPI thread_proc(LPVOID userData) {
auto* thread = reinterpret_cast<ThreadImpl*>(userData);
thread->proc(thread->userData);
return 0;
}
// Mutex
void MutexDeletor::operator()(Mutex* pMutex) const noexcept {
std::default_delete<MutexImpl>{}(reinterpret_cast<MutexImpl*>(pMutex));
}
Memory::IntrusivePtr<Mutex> Mutex::create() {
Memory::IntrusivePtr<Mutex> res(new MutexImpl);
InitializeSRWLock(&static_cast<MutexImpl*>(res.get())->handle);
return res;
}
void Mutex::lock() {
AcquireSRWLockExclusive(&static_cast<MutexImpl*>(this)->handle);
}
bool Mutex::try_lock() {
return TryAcquireSRWLockExclusive(&static_cast<MutexImpl*>(this)->handle);
}
void Mutex::unlock() {
ReleaseSRWLockExclusive(&static_cast<MutexImpl*>(this)->handle);
}
// ConditionVariable
void ConditionVariableDeletor::operator()(ConditionVariable* pCVar) const noexcept {
std::default_delete<ConditionVariableImpl>{}(reinterpret_cast<ConditionVariableImpl*>(pCVar));
}
Memory::IntrusivePtr<ConditionVariable> ConditionVariable::create() {
Memory::IntrusivePtr<ConditionVariable> res(new ConditionVariableImpl);
InitializeConditionVariable(&static_cast<ConditionVariableImpl*>(res.get())->handle);
return res;
}
void ConditionVariable::wait(Mutex& mutex) {
SleepConditionVariableSRW(&static_cast<ConditionVariableImpl*>(this)->handle,
&static_cast<MutexImpl&>(mutex).handle, INFINITE, 0);
}
void ConditionVariable::notify_one() {
WakeConditionVariable(&static_cast<ConditionVariableImpl*>(this)->handle);
}
void ConditionVariable::notify_all() {
WakeAllConditionVariable(&static_cast<ConditionVariableImpl*>(this)->handle);
}
|
whupdup/frame
|
real/core/threading_win32.cpp
|
C++
|
gpl-3.0
| 3,432
|
#pragma once
#include <core/algorithms.hpp>
#include <core/common.hpp>
#include <cassert>
#include <new>
#include <type_traits>
#include <utility>
namespace ZN {
template <typename... Types>
class Variant {
public:
static_assert(sizeof...(Types) < 255, "Cannot have more than 255 types in one variant");
using index_type = uint8_t;
static constexpr const index_type INVALID_INDEX = sizeof...(Types);
template <typename T>
static constexpr index_type index_of() {
return index_of_internal<T, 0, Types...>();
}
template <typename T>
static constexpr bool can_contain() {
return index_of<T>() != INVALID_INDEX;
}
constexpr Variant() noexcept = default;
template <typename T, typename StrippedT = std::remove_cv_t<T>>
ZN_FORCEINLINE Variant(T&& value) requires(can_contain<StrippedT>()) {
set(std::forward<StrippedT>(value));
}
template <typename T, typename StrippedT = std::remove_cv_t<T>>
ZN_FORCEINLINE Variant(const T& value) requires(can_contain<StrippedT>()) {
set(value);
}
~Variant() requires(!(std::is_destructible_v<Types> && ...)) = delete;
~Variant() requires(std::is_trivially_destructible_v<Types> && ...) = default;
ZN_FORCEINLINE ~Variant() requires(!(std::is_trivially_destructible_v<Types> && ...)) {
destroy_internal<0, Types...>();
}
Variant(Variant&&) requires(!(std::is_move_constructible_v<Types> && ...)) = delete;
Variant(Variant&&) noexcept = default;
ZN_FORCEINLINE Variant(Variant&& other) noexcept
requires(!(std::is_trivially_move_constructible_v<Types> && ...)) {
if (this != &other) {
move_internal<0, Types...>(other.m_index, other.m_data, m_data);
m_index = other.m_index;
other.m_index = INVALID_INDEX;
}
}
void operator=(Variant&&) requires(!(std::is_move_constructible_v<Types> && ...)
|| !(std::is_destructible_v<Types> && ...)) = delete;
Variant& operator=(Variant&&) noexcept = default;
ZN_FORCEINLINE Variant& operator=(Variant&& other) noexcept
requires(!(std::is_trivially_move_constructible_v<Types> && ...)
|| !(std::is_trivially_destructible_v<Types> && ...)) {
if (this != &other) {
if constexpr (!(std::is_trivially_destructible_v<Types> && ...)) {
destroy_internal<0, Types...>();
}
move_internal<0, Types...>(other.m_index, other.m_data, m_data);
m_index = other.m_index;
other.m_index = INVALID_INDEX;
}
return *this;
}
Variant(const Variant&) requires(!(std::is_copy_constructible_v<Types> && ...)) = delete;
Variant(const Variant&) = default;
ZN_FORCEINLINE Variant(const Variant& other)
requires(!(std::is_trivially_copy_constructible_v<Types> && ...)) {
copy_internal<0, Types...>(other.m_index, other.m_data, m_data);
m_index = other.m_index;
}
ZN_FORCEINLINE Variant& operator=(const Variant& other)
requires(!(std::is_trivially_copy_constructible_v<Types> && ...)
|| !(std::is_trivially_destructible_v<Types> && ...)) {
if (this != &other) {
if constexpr (!(std::is_trivially_destructible_v<Types> && ...)) {
destroy_internal<0, Types...>();
}
copy_internal<0, Types...>(other.m_index, other.m_data, m_data);
m_index = other.m_index;
}
return *this;
}
template <typename T, typename StrippedT = std::remove_cv_t<T>>
ZN_FORCEINLINE Variant& operator=(T&& value) noexcept requires(can_contain<StrippedT>()) {
set(std::forward<T>(value));
return *this;
}
template <typename T, typename StrippedT = std::remove_cv_t<T>>
ZN_FORCEINLINE Variant& operator=(const T& value) requires(can_contain<StrippedT>()) {
set(value);
return *this;
}
template <typename T, typename StrippedT = std::remove_cv_t<T>>
void set(T&& v) requires(can_contain<StrippedT>()
&& requires { StrippedT(std::forward<T>(v)); }) {
if (m_index != INVALID_INDEX) {
if constexpr (!(std::is_trivially_destructible_v<Types> && ...)) {
destroy_internal<0, Types...>();
}
}
m_index = index_of<StrippedT>();
new (m_data) StrippedT(std::forward<T>(v));
}
template <typename T, typename StrippedT = std::remove_cv_t<T>>
void set(const T& v) requires(can_contain<StrippedT>()
&& requires { StrippedT(v); }) {
if (m_index != INVALID_INDEX) {
if constexpr (!(std::is_trivially_destructible_v<Types> && ...)) {
destroy_internal<0, Types...>();
}
}
m_index = index_of<StrippedT>();
new (m_data) StrippedT(v);
}
template <typename T>
T& get() requires(can_contain<T>()) {
assert(has<T>() && "Invalid type attempted to be retrieved");
return *reinterpret_cast<T*>(m_data);
}
template <typename T>
const T& get() const requires(can_contain<T>()) {
return const_cast<Variant*>(this)->get<T>();
}
template <typename T>
operator T() const requires(can_contain<T>()) {
return get<T>();
}
template <typename T>
constexpr bool has() const {
return m_index == index_of<T>();
}
template <typename Visitor>
ZN_FORCEINLINE void visit(Visitor&& visitor) {
visit_internal<Visitor, 0, Types...>(std::forward<Visitor>(visitor));
}
private:
static constexpr const size_t DATA_SIZE = max<size_t>({sizeof(Types)...});
static constexpr const size_t DATA_ALIGN = max<size_t>({alignof(Types)...});
alignas(DATA_ALIGN) uint8_t m_data[DATA_SIZE] = {};
index_type m_index = INVALID_INDEX;
template <typename T, index_type InitialIndex, typename T0, typename... TOthers>
static consteval index_type index_of_internal() {
if constexpr (std::is_same_v<T, T0>) {
return InitialIndex;
}
else if constexpr (sizeof...(TOthers) > 0) {
return index_of_internal<T, InitialIndex + 1, TOthers...>();
}
else {
return InitialIndex + 1;
}
}
template <typename Visitor, index_type CheckIndex, typename T0, typename... TOthers>
ZN_FORCEINLINE constexpr void visit_internal(Visitor&& visitor) {
if (m_index == CheckIndex) {
visitor(*reinterpret_cast<T0*>(m_data));
}
else if constexpr (sizeof...(TOthers) > 0) {
visit_internal<Visitor, CheckIndex + 1, TOthers...>(
std::forward<Visitor>(visitor));
}
}
template <index_type CheckIndex, typename T0, typename... TOthers>
ZN_FORCEINLINE constexpr void destroy_internal() {
if (m_index == CheckIndex) {
reinterpret_cast<T0*>(m_data)->~T0();
m_index = INVALID_INDEX;
}
else if constexpr (sizeof...(TOthers) > 0) {
destroy_internal<CheckIndex + 1, TOthers...>();
}
}
template <index_type CheckIndex, typename T0, typename... TOthers>
ZN_FORCEINLINE static constexpr void move_internal(index_type oldIndex, void* oldData,
void* newData) {
if (oldIndex == CheckIndex) {
new (newData) T0(std::move(*reinterpret_cast<T0*>(oldData)));
}
else if constexpr (sizeof...(TOthers) > 0) {
move_internal<CheckIndex + 1, TOthers...>(oldIndex, oldData, newData);
}
}
template <index_type CheckIndex, typename T0, typename... TOthers>
ZN_FORCEINLINE static constexpr void copy_internal(index_type oldIndex,
const void* oldData, void* newData) {
if (oldIndex == CheckIndex) {
new (newData) T0(*reinterpret_cast<const T0*>(oldData));
}
else if constexpr (sizeof...(TOthers) > 0) {
copy_internal<CheckIndex + 1, TOthers...>(oldIndex, oldData, newData);
}
}
};
}
|
whupdup/frame
|
real/core/variant.hpp
|
C++
|
gpl-3.0
| 7,299
|
target_sources(LibCommon PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/file_system.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/os_file.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/os_file_system.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/passthrough_file_system.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/path_utils.cpp"
)
|
whupdup/frame
|
real/file/CMakeLists.txt
|
Text
|
gpl-3.0
| 281
|
#pragma once
#include <core/base_stream.hpp>
namespace ZN {
class InputFile : public InputStream {};
class OutputFile : public OutputStream {};
}
|
whupdup/frame
|
real/file/file.hpp
|
C++
|
gpl-3.0
| 152
|
#include "file_system.hpp"
#include <file/file.hpp>
#include <file/os_file_system.hpp>
#include <file/passthrough_file_system.hpp>
#include <file/path_utils.hpp>
using namespace ZN;
static std::unique_ptr<OSFileSystem> create_resource_directory_backend(
const std::string_view& execFolder, const std::string_view& targetName);
FileSystem::FileSystem() {
auto execPath = PathUtils::get_executable_path();
auto execFolder = PathUtils::get_directory(execPath);
auto fileFS = std::make_unique<OSFileSystem>(execFolder);
{
FileStats stats{};
if (!fileFS->get_file_stats("rbxassetcache", stats)
|| stats.type != PathType::DIRECTORY) {
fileFS->create_directory("rbxassetcache");
}
}
add_backend("rbxassetcache", std::make_unique<PassthroughFileSystem>(*fileFS,
"rbxassetcache"));
add_backend("file", std::move(fileFS));
add_backend("res", create_resource_directory_backend(execFolder, "res"));
add_backend("maps", create_resource_directory_backend(execFolder, "maps"));
add_backend("shaders", create_resource_directory_backend(execFolder, "shaders"));
//add_backend("rbxassetid", std::make_unique<RBXAssetCache>());
add_backend("rbxasset", create_resource_directory_backend(execFolder, "rbxasset"));
}
void FileSystem::add_backend(const std::string_view& scheme,
std::unique_ptr<FileSystemBackend> backend) {
backend->set_scheme(scheme);
m_backends[std::string(scheme)] = std::move(backend);
}
std::vector<Path> FileSystem::walk(const std::string_view& path) {
auto [scheme, pathName] = PathUtils::split_scheme(path);
FileSystemBackend* backend = get_backend(scheme);
if (!backend) {
return {};
}
return backend->walk(pathName);
}
std::vector<Path> FileSystem::list(const std::string_view& path) {
auto [scheme, pathName] = PathUtils::split_scheme(path);
FileSystemBackend* backend = get_backend(scheme);
if (!backend) {
return {};
}
return backend->list(pathName);
}
bool FileSystem::get_file_stats(const std::string_view& path, FileStats& fileStats) {
auto [scheme, pathName] = PathUtils::split_scheme(path);
FileSystemBackend* backend = get_backend(scheme);
if (!backend) {
return false;
}
return backend->get_file_stats(pathName, fileStats);
}
bool FileSystem::exists(const std::string_view& path) {
auto [scheme, pathName] = PathUtils::split_scheme(path);
FileSystemBackend* backend = get_backend(scheme);
if (!backend) {
return false;
}
return backend->exists(pathName);
}
std::unique_ptr<InputFile> FileSystem::open_file_read(const std::string_view& path) {
auto [scheme, pathName] = PathUtils::split_scheme(path);
FileSystemBackend* backend = get_backend(scheme);
if (!backend) {
return {};
}
return backend->open_file_read(pathName);
}
std::unique_ptr<OutputFile> FileSystem::open_file_write(const std::string_view& path) {
auto [scheme, pathName] = PathUtils::split_scheme(path);
FileSystemBackend* backend = get_backend(scheme);
if (!backend) {
return {};
}
return backend->open_file_write(pathName);
}
bool FileSystem::create_directory(const std::string_view& path) {
auto [scheme, pathName] = PathUtils::split_scheme(path);
FileSystemBackend* backend = get_backend(scheme);
if (!backend) {
return {};
}
return backend->create_directory(pathName);
}
std::vector<char> FileSystem::file_read_bytes(const std::string_view& path) {
auto file = open_file_read(path);
if (!file) {
return {};
}
size_t size = file->get_size();
std::vector<char> res;
res.resize(size);
file->read(res.data(), size);
return res;
}
std::string FileSystem::get_file_system_path(const std::string_view& path) {
auto [scheme, pathName] = PathUtils::split_scheme(path);
FileSystemBackend* backend = get_backend(scheme);
if (!backend) {
return "";
}
return backend->get_file_system_path(pathName);
}
FileSystemBackend* FileSystem::get_backend(const std::string_view& scheme) {
std::unordered_map<std::string, std::unique_ptr<FileSystemBackend>>::iterator it;
if (scheme.empty()) {
it = m_backends.find("file");
}
else {
it = m_backends.find(std::string(scheme));
}
if (it != m_backends.end()) {
return it->second.get();
}
return nullptr;
}
// FileSystemBackend
bool FileSystemBackend::exists(const std::string_view& path) {
FileStats stats{};
return get_file_stats(path, stats);
}
std::vector<Path> FileSystemBackend::walk(const std::string_view& path) {
std::vector<std::vector<Path>> entryStack;
std::vector<Path> result;
entryStack.push_back(list(path));
while (!entryStack.empty()) {
auto entries = entryStack.back();
entryStack.pop_back();
for (auto& entry : entries) {
switch (entry.type) {
case PathType::DIRECTORY:
entryStack.push_back(list(entry.name));
break;
default:
result.push_back(std::move(entry));
}
}
}
return result;
}
std::string FileSystemBackend::get_file_system_path(const std::string_view&) {
return "";
}
void FileSystemBackend::set_scheme(const std::string_view& scheme) {
m_scheme = std::string(scheme);
}
static std::unique_ptr<OSFileSystem> create_resource_directory_backend(
const std::string_view& execFolder, const std::string_view& targetName) {
std::string_view parentDir = execFolder;
for (;;) {
auto pathToTest = PathUtils::join(parentDir, targetName);
if (OSFileSystem::global_file_exists(pathToTest.c_str())) {
return std::make_unique<OSFileSystem>(pathToTest);
}
auto lastParentDir = parentDir;
parentDir = PathUtils::get_directory(parentDir);
if (parentDir.compare(lastParentDir) == 0 || parentDir.empty()) {
break;
}
}
return std::make_unique<OSFileSystem>(PathUtils::join(execFolder, targetName));
}
|
whupdup/frame
|
real/file/file_system.cpp
|
C++
|
gpl-3.0
| 5,660
|
#pragma once
#include <core/common.hpp>
#include <core/local.hpp>
#include <core/memory.hpp>
#include <unordered_map>
#include <string>
#include <string_view>
#include <vector>
namespace ZN {
enum class PathType : uint8_t {
FILE,
DIRECTORY,
SPECIAL
};
struct Path {
std::string name;
PathType type;
};
struct FileStats {
size_t size;
PathType type;
uint64_t lastModified;
};
class InputFile;
class OutputFile;
class FileSystemBackend;
class FileSystem final {
public:
explicit FileSystem();
NULL_COPY_AND_ASSIGN(FileSystem);
void add_backend(const std::string_view& scheme,
std::unique_ptr<FileSystemBackend> backend);
std::vector<Path> walk(const std::string_view& path);
std::vector<Path> list(const std::string_view& path);
bool get_file_stats(const std::string_view& path, FileStats& stats);
bool exists(const std::string_view& path);
std::unique_ptr<InputFile> open_file_read(const std::string_view& path);
std::unique_ptr<OutputFile> open_file_write(const std::string_view& path);
bool create_directory(const std::string_view& path);
std::vector<char> file_read_bytes(const std::string_view& path);
std::string get_file_system_path(const std::string_view& path);
FileSystemBackend* get_backend(const std::string_view& scheme);
private:
std::unordered_map<std::string, std::unique_ptr<FileSystemBackend>> m_backends;
};
class FileSystemBackend {
public:
virtual ~FileSystemBackend() = default;
std::vector<Path> walk(const std::string_view& path);
virtual std::vector<Path> list(const std::string_view& path) = 0;
virtual bool get_file_stats(const std::string_view& path, FileStats& stats) = 0;
virtual std::string get_file_system_path(const std::string_view& path) = 0;
virtual bool exists(const std::string_view& path);
virtual std::unique_ptr<InputFile> open_file_read(const std::string_view& path) = 0;
virtual std::unique_ptr<OutputFile> open_file_write(const std::string_view& path) = 0;
virtual bool create_directory(const std::string_view& path) = 0;
void set_scheme(const std::string_view& scheme);
private:
std::string m_scheme;
};
inline Local<FileSystem> g_fileSystem;
}
|
whupdup/frame
|
real/file/file_system.hpp
|
C++
|
gpl-3.0
| 2,179
|
#include "os_file.hpp"
#include <cassert>
#include <cstdio>
using namespace ZN;
#if defined(OPERATING_SYSTEM_WINDOWS)
static FILE* open_file(const char* path, const char* mode) {
FILE* result;
if (fopen_s(&result, path, mode) == 0) {
return result;
}
return nullptr;
}
#else
#define open_file fopen
#endif
// OSInputFile
OSInputFile::OSInputFile()
: m_handle(nullptr)
, m_hasNext(false) {}
bool OSInputFile::open(const char* path) {
m_handle = open_file(path, "rb");
m_hasNext = true;
return is_open();
}
void OSInputFile::close() {
if (is_open()) {
fclose(static_cast<FILE*>(m_handle));
m_handle = nullptr;
m_hasNext = false;
}
}
bool OSInputFile::is_open() const {
return m_handle != nullptr;
}
int OSInputFile::get() {
auto res = fgetc(static_cast<FILE*>(m_handle));
m_hasNext = m_hasNext && (res != EOF);
return res;
}
size_t OSInputFile::read(void* buffer, size_t size) {
assert(is_open());
auto numRead = fread(buffer, 1, size, static_cast<FILE*>(m_handle));
m_hasNext = m_hasNext && numRead == size;
return numRead;
}
const void* OSInputFile::get_buffer() const {
return nullptr;
}
size_t OSInputFile::get_size() const {
assert(is_open());
long pos = ftell(static_cast<FILE*>(m_handle));
fseek(static_cast<FILE*>(m_handle), 0, SEEK_END);
size_t size = static_cast<size_t>(ftell(static_cast<FILE*>(m_handle)));
fseek(static_cast<FILE*>(m_handle), pos, SEEK_SET);
return size;
}
bool OSInputFile::has_next() const {
return m_hasNext;
}
OSInputFile::~OSInputFile() {
close();
}
// OSOutputFile
OSOutputFile::OSOutputFile()
: m_handle(nullptr) {}
bool OSOutputFile::open(const char* path) {
m_handle = open_file(path, "wb");
return is_open();
}
void OSOutputFile::close() {
if (is_open()) {
fclose(static_cast<FILE*>(m_handle));
m_handle = nullptr;
}
}
bool OSOutputFile::is_open() const {
return m_handle != nullptr;
}
size_t OSOutputFile::write(const void* buffer, size_t size) {
assert(is_open());
return fwrite(buffer, size, 1, static_cast<FILE*>(m_handle));
}
void OSOutputFile::flush() {
assert(is_open());
fflush(static_cast<FILE*>(m_handle));
}
OSOutputFile::~OSOutputFile() {
close();
}
|
whupdup/frame
|
real/file/os_file.cpp
|
C++
|
gpl-3.0
| 2,187
|
#pragma once
#include <core/common.hpp>
#include <file/file.hpp>
namespace ZN {
class OSInputFile final : public InputFile {
public:
OSInputFile();
~OSInputFile();
NULL_COPY_AND_ASSIGN(OSInputFile);
[[nodiscard]] bool open(const char* path);
void close();
bool is_open() const;
int get() override;
size_t read(void* buffer, size_t size) override;
const void* get_buffer() const override;
size_t get_size() const override;
bool has_next() const override;
private:
void* m_handle;
bool m_hasNext;
};
class OSOutputFile final : public OutputFile {
public:
OSOutputFile();
~OSOutputFile();
NULL_COPY_AND_ASSIGN(OSOutputFile);
[[nodiscard]] bool open(const char* path);
void close();
bool is_open() const;
size_t write(const void* buffer, size_t size) override;
void flush();
template <typename T>
size_t write(const T& value) {
return write(value, sizeof(T));
}
private:
void* m_handle;
};
}
|
whupdup/frame
|
real/file/os_file.hpp
|
C++
|
gpl-3.0
| 961
|
#include "os_file_system.hpp"
#include <file/os_file.hpp>
#include <file/path_utils.hpp>
using namespace ZN;
bool OSFileSystem::get_file_stats(const std::string_view& path, FileStats& fileStats) {
auto joinedPath = PathUtils::join(m_base, path);
return global_get_file_stats(joinedPath.c_str(), fileStats);
}
bool OSFileSystem::exists(const std::string_view& path) {
auto joinedPath = PathUtils::join(m_base, path);
return global_file_exists(joinedPath.c_str());
}
std::string OSFileSystem::get_file_system_path(const std::string_view& pathName) {
return PathUtils::join(m_base, pathName);
}
std::unique_ptr<InputFile> OSFileSystem::open_file_read(const std::string_view& path) {
OSInputFile* file = new OSInputFile;
auto fsPath = get_file_system_path(path);
if (!file->open(fsPath.c_str())) {
delete file;
return nullptr;
}
return std::unique_ptr<InputFile>(file);
}
std::unique_ptr<OutputFile> OSFileSystem::open_file_write(const std::string_view& path) {
OSOutputFile* file = new OSOutputFile;
auto fsPath = get_file_system_path(path);
if (!file->open(fsPath.c_str())) {
delete file;
return nullptr;
}
return std::unique_ptr<OutputFile>(file);
}
bool OSFileSystem::create_directory(const std::string_view& path) {
auto joinedPath = PathUtils::join(m_base, path);
return global_create_directory(joinedPath.c_str());
}
#if defined(OPERATING_SYSTEM_WINDOWS)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <sys/types.h>
#include <sys/stat.h>
bool OSFileSystem::global_get_file_stats(const char* path, FileStats& fileStats) {
struct __stat64 buf;
if (_stat64(path, &buf) < 0) {
return false;
}
if (buf.st_mode & _S_IFREG) {
fileStats.type = PathType::FILE;
}
else if (buf.st_mode & _S_IFDIR) {
fileStats.type = PathType::DIRECTORY;
}
else {
fileStats.type = PathType::SPECIAL;
}
fileStats.size = static_cast<size_t>(buf.st_size);
fileStats.lastModified = static_cast<uint64_t>(buf.st_mtime);
return true;
}
bool OSFileSystem::global_file_exists(const char* path) {
struct __stat64 buf;
return _stat64(path, &buf) >= 0;
}
bool OSFileSystem::global_create_directory(const char* path) {
return CreateDirectoryA(path, NULL);
}
OSFileSystem::OSFileSystem(const std::string_view& base)
: m_base(base) {}
std::vector<Path> OSFileSystem::list(const std::string_view& pathName) {
WIN32_FIND_DATAA result;
std::string joinedPath = PathUtils::join(m_base, pathName);
HANDLE handle = FindFirstFileA(joinedPath.c_str(), &result);
if (handle == INVALID_HANDLE_VALUE) {
// TODO: LOG_ERROR
return {};
}
if (result.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
joinedPath = PathUtils::join(joinedPath, "*");
CloseHandle(handle);
handle = FindFirstFileA(joinedPath.c_str(), &result);
if (handle == INVALID_HANDLE_VALUE) {
// TODO: LOG_ERROR
return {};
}
}
std::vector<Path> entries;
do {
if (strcmp(result.cFileName, ".") == 0 || strcmp(result.cFileName, "..") == 0) {
continue;
}
Path entry;
if (result.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
entry.type = PathType::DIRECTORY;
}
else if (result.dwFileAttributes
& (FILE_ATTRIBUTE_DEVICE | FILE_ATTRIBUTE_SYSTEM | FILE_ATTRIBUTE_VIRTUAL)) {
entry.type = PathType::SPECIAL;
}
else {
entry.type = PathType::FILE;
}
entry.name = PathUtils::join(pathName, result.cFileName);
entries.push_back(std::move(entry));
}
while (FindNextFileA(handle, &result));
CloseHandle(handle);
return entries;
}
#elif defined(OPERATING_SYSTEM_LINUX)
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <dirent.h>
OSFileSystem::OSFileSystem(const std::string_view& base)
: m_base(base) {}
std::vector<Path> OSFileSystem::list(const std::string_view& pathName) {
std::string joinedPath = PathUtils::join(m_base, pathName);
DIR* dir = opendir(joinedPath.c_str());
if (!dir) {
// TODO: LOG_ERROR failed to open directory
return {};
}
std::vector<Path> entries;
struct dirent* result;
while ((result = readdir(dir))) {
if (strcmp(result->d_name, ".") == 0 || strcmp(result->d_name, "..") == 0) {
continue;
}
Path entry;
entry.name = PathUtils::join(pathName, result->d_name);
switch (result->d_type) {
case DT_DIR:
entry.type = PathType::DIRECTORY;
break;
case DT_REG:
entry.type = PathType::FILE;
case DT_UNKNOWN:
case DT_LNK:
{
FileStats s;
if (!get_file_stats(entry.name, s)) {
// TODO: LOG_ERROR Failed to stat file
continue;
}
entry.type = s.type;
}
break;
default:
entry.type = PathType::SPECIAL;
}
entries.push_back(std::move(entry));
}
closedir(dir);
return entries;
}
bool OSFileSystem::get_file_stats(const std::string_view& pathName, FileStats& stats) {
std::string joinedPath = PathUtils::join(m_base, pathName);
struct stat buf;
if (stat(joinedPath.c_str(), &buf) < 0) {
return false;
}
if (S_ISREG(buf.st_mode)) {
stats.type = PathType::FILE;
}
else if (S_ISDIR(buf.st_mode)) {
stats.type = PathType::DIRECTORY;
}
else {
stats.type = PathType::SPECIAL;
}
stats.size = static_cast<size_t>(buf.st_size);
stats.lastModified = buf.st_mtim.tv_sec * 1000000000ull + buf.st_mtim.tv_nsec;
return true;
}
#endif
|
whupdup/frame
|
real/file/os_file_system.cpp
|
C++
|
gpl-3.0
| 5,262
|
#pragma once
#include <file/file_system.hpp>
namespace ZN {
class OSFileSystem final : public FileSystemBackend {
public:
static bool global_get_file_stats(const char* path, FileStats& fileStats);
static bool global_file_exists(const char* path);
static bool global_create_directory(const char* path);
OSFileSystem(const std::string_view& base);
virtual ~OSFileSystem() = default;
std::vector<Path> list(const std::string_view& pathName) override;
[[nodiscard]] virtual bool get_file_stats(const std::string_view& path,
FileStats& stats) override;
std::string get_file_system_path(const std::string_view& pathName) override;
bool exists(const std::string_view& path) override;
std::unique_ptr<InputFile> open_file_read(const std::string_view& path) override;
std::unique_ptr<OutputFile> open_file_write(const std::string_view& path) override;
bool create_directory(const std::string_view& path) override;
private:
std::string m_base;
};
}
|
whupdup/frame
|
real/file/os_file_system.hpp
|
C++
|
gpl-3.0
| 980
|
#include "passthrough_file_system.hpp"
#include <file/file.hpp>
#include <file/path_utils.hpp>
using namespace ZN;
PassthroughFileSystem::PassthroughFileSystem(FileSystemBackend& parent,
const std::string_view& base)
: m_parent(&parent)
, m_base(base) {}
std::vector<Path> PassthroughFileSystem::list(const std::string_view& pathName) {
return m_parent->list(PathUtils::join(m_base, pathName));
}
bool PassthroughFileSystem::get_file_stats(const std::string_view& path, FileStats& stats) {
return m_parent->get_file_stats(PathUtils::join(m_base, path), stats);
}
std::string PassthroughFileSystem::get_file_system_path(const std::string_view& pathName) {
return m_parent->get_file_system_path(PathUtils::join(m_base, pathName));
}
std::unique_ptr<InputFile> PassthroughFileSystem::open_file_read(const std::string_view& path) {
return m_parent->open_file_read(PathUtils::join(m_base, path));
}
std::unique_ptr<OutputFile> PassthroughFileSystem::open_file_write(const std::string_view& path) {
return m_parent->open_file_write(PathUtils::join(m_base, path));
}
bool PassthroughFileSystem::create_directory(const std::string_view& path) {
return m_parent->create_directory(PathUtils::join(m_base, path));
}
|
whupdup/frame
|
real/file/passthrough_file_system.cpp
|
C++
|
gpl-3.0
| 1,228
|
#pragma once
#include <file/file_system.hpp>
namespace ZN {
class PassthroughFileSystem final : public FileSystemBackend {
public:
explicit PassthroughFileSystem(FileSystemBackend& parent, const std::string_view& base);
virtual ~PassthroughFileSystem() = default;
NULL_COPY_AND_ASSIGN(PassthroughFileSystem);
std::vector<Path> list(const std::string_view& pathName) override;
[[nodiscard]] bool get_file_stats(const std::string_view& path, FileStats& stats) override;
std::string get_file_system_path(const std::string_view& pathName) override;
std::unique_ptr<InputFile> open_file_read(const std::string_view& path) override;
std::unique_ptr<OutputFile> open_file_write(const std::string_view& path) override;
bool create_directory(const std::string_view& path) override;
private:
FileSystemBackend* m_parent;
std::string m_base;
};
}
|
whupdup/frame
|
real/file/passthrough_file_system.hpp
|
C++
|
gpl-3.0
| 868
|
#include "path_utils.hpp"
#include <core/common.hpp>
#include <algorithm>
#include <sstream>
using namespace ZN;
#ifdef OPERATING_SYSTEM_WINDOWS
static constexpr const bool HAS_BACKSLASH = true;
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#else
#include <array>
static constexpr const bool HAS_BACKSLASH = false;
static constexpr const std::array<const char*, 3> FILE_EXTENSIONS = {
"exe",
"file",
"a.out"
};
#include <unistd.h>
#ifdef OPERATING_SYSTEM_LINUX
#include <linux/limits.h>
#endif
#endif
static size_t find_last_slash(const std::string_view& str);
std::string PathUtils::join(const std::string_view& base,
const std::string_view& path) {
if (base.empty()) {
return std::string(path);
}
if (path.empty()) {
return std::string(base);
}
std::ostringstream ss;
ss << base;
if (find_last_slash(base) != base.length() - 1) {
ss << '/';
}
ss << path;
return ss.str();
}
std::string PathUtils::make_relative_path(const std::string_view& base,
const std::string_view& path) {
return PathUtils::join(PathUtils::get_directory(base), path);
}
std::string PathUtils::canonicalize_path(const std::string_view& path) {
std::vector<std::string_view> tokens = tokenize_path(path);
std::vector<std::string_view> result;
for (auto& token : tokens) {
if (token == ".") {
continue;
}
if (token == "..") {
if (!result.empty()) {
result.pop_back();
}
}
else {
result.push_back(token);
}
}
return merge_path(result);
}
std::string PathUtils::enforce_scheme(const std::string_view& path) {
if (path.empty()) {
return "";
}
if (path.find("://") == std::string_view::npos) {
return std::string("file://") + std::string(path);
}
return std::string(path);
}
Pair<std::string_view, std::string_view> PathUtils::split(const std::string_view& path) {
if (path.empty()) {
return {};
}
size_t index = find_last_slash(path);
if (index == std::string_view::npos) {
return {path, ""};
}
return {
path.substr(0, index),
path.substr(index + 1)
};
}
Pair<std::string_view, std::string_view> PathUtils::split_scheme(const std::string_view& path) {
if (path.empty()) {
return {};
}
size_t index = path.find("://");
if (index == std::string_view::npos) {
return {"", path};
}
return {
path.substr(0, index),
path.substr(index + 3)
};
}
std::string_view PathUtils::get_directory(const std::string_view& path) {
if (path.empty()) {
return path;
}
size_t index = find_last_slash(path);
if (index == std::string::npos) {
return path;
}
return path.substr(0, index);
}
std::string_view PathUtils::get_file_name(const std::string_view& path) {
if (path.empty()) {
return path;
}
size_t index = find_last_slash(path);
if (index == std::string::npos) {
return path;
}
return path.substr(index + 1);
}
std::string_view PathUtils::get_file_extension(const std::string_view& path) {
if (path.empty()) {
return path;
}
size_t index = path.find_last_of('.');
if (index == std::string::npos) {
return "";
}
return path.substr(index + 1);
}
std::vector<std::string_view> PathUtils::tokenize_path(const std::string_view& path) {
if (path.empty()) {
return {};
}
std::vector<std::string_view> result;
size_t startIndex = 0;
size_t index;
constexpr const char* delim = HAS_BACKSLASH ? "/\\" : "/";
while ((index = path.find_first_of(delim, startIndex)) != std::string_view::npos) {
if (index > startIndex) {
result.push_back(path.substr(startIndex, index - startIndex));
}
startIndex = index + 1;
}
if (startIndex < path.length()) {
result.push_back(path.substr(startIndex));
}
return result;
}
std::string PathUtils::merge_path(const std::vector<std::string_view>& tokens) {
std::ostringstream ss;
for (size_t i = 0; i < tokens.size(); ++i) {
ss << tokens[i];
if (i != tokens.size() - 1) {
ss << '/';
}
}
return ss.str();
}
std::string PathUtils::get_executable_path() {
#ifdef OPERATING_SYSTEM_WINDOWS
char target[4096];
DWORD ret = GetModuleFileNameA(GetModuleHandle(nullptr), target, sizeof(target));
target[ret] = '\0';
return canonicalize_path(target);
#else
char linkPath[PATH_MAX];
char target[PATH_MAX];
for (const char* ext : FILE_EXTENSIONS) {
std::snprintf(linkPath, sizeof(linkPath), "/proc/self/%s", ext);
ssize_t ret = readlink(linkPath, target, sizeof(target) - 1);
if (ret >= 0) {
target[ret] = '\0';
}
return canonicalize_path(target);
}
return std::string("");
#endif
}
static size_t find_last_slash(const std::string_view& str) {
if constexpr (HAS_BACKSLASH) {
return str.find_last_of("/\\");
}
else {
return str.find_last_of('/');
}
}
|
whupdup/frame
|
real/file/path_utils.cpp
|
C++
|
gpl-3.0
| 4,683
|
#pragma once
#include <core/pair.hpp>
#include <string>
#include <string_view>
#include <vector>
namespace ZN::PathUtils {
std::string join(const std::string_view& base, const std::string_view& path);
std::string make_relative_path(const std::string_view& base, const std::string_view& path);
std::string canonicalize_path(const std::string_view& path);
std::string enforce_scheme(const std::string_view& path);
Pair<std::string_view, std::string_view> split(const std::string_view& path);
Pair<std::string_view, std::string_view> split_scheme(const std::string_view& path);
std::string_view get_directory(const std::string_view& path);
std::string_view get_file_name(const std::string_view& path);
std::string_view get_file_extension(const std::string_view& path);
std::vector<std::string_view> tokenize_path(const std::string_view& path);
std::string merge_path(const std::vector<std::string_view>& tokens);
std::string get_executable_path();
}
|
whupdup/frame
|
real/file/path_utils.hpp
|
C++
|
gpl-3.0
| 957
|
target_sources(LibCommon PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/barrier_info_collection.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/base_frame_graph_pass.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/buffer.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/buffer_resource_tracker.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/command_buffer.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/command_pool.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/compute_pipeline.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/dds_texture.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/descriptors.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/fence.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/frame_graph.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/frame_graph_pass.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/framebuffer.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/graphics_pipeline.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/graphics_pipeline_builder.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/image.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/image_resource_tracker.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/image_view.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/material.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/mesh_pool.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/non_owning_mesh_pool.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/owning_mesh_pool.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/pipeline_layout.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/queue.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/render_context.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/render_pass.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/sampler.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/semaphore_pool.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/shader_program.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/specialization_info.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/texture.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/texture_registry.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/upload_context.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/vk_initializers.cpp"
)
|
whupdup/frame
|
real/graphics/CMakeLists.txt
|
Text
|
gpl-3.0
| 1,722
|
#include "barrier_info_collection.hpp"
#include <core/hash_builder.hpp>
#include <graphics/command_buffer.hpp>
using namespace ZN;
using namespace ZN::GFX;
static bool image_barrier_equals(const VkImageMemoryBarrier& a, const VkImageMemoryBarrier& b);
static bool image_subresource_range_equals(const VkImageSubresourceRange& a,
const VkImageSubresourceRange& b);
void BarrierInfoCollection::add_pipeline_barrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask) {
m_info.try_emplace(StageInfo{srcStageMask, dstStageMask, 0});
}
void BarrierInfoCollection::add_image_memory_barrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
VkImageMemoryBarrier barrier) {
m_info[StageInfo{srcStageMask, dstStageMask, dependencyFlags}].imageBarriers
.emplace_back(std::move(barrier));
}
void BarrierInfoCollection::add_buffer_memory_barrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
VkBufferMemoryBarrier barrier) {
m_info[StageInfo{srcStageMask, dstStageMask, dependencyFlags}].bufferBarriers
.emplace_back(std::move(barrier));
}
void BarrierInfoCollection::emit_barriers(CommandBuffer& cmd) {
for (auto& [flags, barrierInfo] : m_info) {
cmd.pipeline_barrier(flags.srcStageMask, flags.dstStageMask, flags.dependencyFlags, 0,
nullptr, static_cast<uint32_t>(barrierInfo.bufferBarriers.size()),
barrierInfo.bufferBarriers.data(),
static_cast<uint32_t>(barrierInfo.imageBarriers.size()),
barrierInfo.imageBarriers.data());
}
//print_barriers();
m_info.clear();
}
bool BarrierInfoCollection::contains_barrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
VkDependencyFlags dependencyFlags, const VkImageMemoryBarrier& barrierIn) const {
if (auto it = m_info.find(StageInfo{srcStageMask, dstStageMask, dependencyFlags});
it != m_info.end()) {
for (auto& barrier : it->second.imageBarriers) {
if (image_barrier_equals(barrier, barrierIn)) {
return true;
}
}
}
return false;
}
size_t BarrierInfoCollection::get_pipeline_barrier_count() const {
return m_info.size();
}
size_t BarrierInfoCollection::get_image_memory_barrier_count(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags) const {
if (auto it = m_info.find(StageInfo{srcStageMask, dstStageMask, dependencyFlags});
it != m_info.end()) {
return it->second.imageBarriers.size();
}
return 0;
}
// StageInfo
bool BarrierInfoCollection::StageInfo::operator==(const StageInfo& other) const {
return srcStageMask == other.srcStageMask && dstStageMask == other.dstStageMask
&& dependencyFlags == other.dependencyFlags;
}
size_t BarrierInfoCollection::StageInfo::hash() const {
return HashBuilder{}
.add_uint32(srcStageMask)
.add_uint32(dstStageMask)
.add_uint32(dependencyFlags)
.get();
}
// StageInfoHash
size_t BarrierInfoCollection::StageInfoHash::operator()(const StageInfo& info) const {
return info.hash();
}
static bool image_barrier_equals(const VkImageMemoryBarrier& a, const VkImageMemoryBarrier& b) {
return a.srcAccessMask == b.srcAccessMask && a.dstAccessMask == b.dstAccessMask
&& a.oldLayout == b.oldLayout && a.newLayout == b.newLayout
&& a.srcQueueFamilyIndex == b.srcQueueFamilyIndex
&& a.dstQueueFamilyIndex == b.dstQueueFamilyIndex
&& a.image == b.image
&& image_subresource_range_equals(a.subresourceRange, b.subresourceRange);
}
static bool image_subresource_range_equals(const VkImageSubresourceRange& a,
const VkImageSubresourceRange& b) {
return a.aspectMask == b.aspectMask && a.baseMipLevel == b.baseMipLevel
&& a.levelCount == b.levelCount && a.baseArrayLayer == b.baseArrayLayer
&& a.layerCount == b.layerCount;
}
|
whupdup/frame
|
real/graphics/barrier_info_collection.cpp
|
C++
|
gpl-3.0
| 3,827
|
#pragma once
#include <volk.h>
#include <unordered_map>
#include <vector>
namespace ZN::GFX {
class CommandBuffer;
class BarrierInfoCollection final {
public:
void add_pipeline_barrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask);
void add_image_memory_barrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
VkImageMemoryBarrier barrier);
void add_buffer_memory_barrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
VkBufferMemoryBarrier barrier);
void emit_barriers(CommandBuffer&);
bool contains_barrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
VkDependencyFlags dependencyFlags, const VkImageMemoryBarrier& barrierIn) const;
size_t get_pipeline_barrier_count() const;
size_t get_image_memory_barrier_count(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags) const;
private:
struct BarrierInfo {
std::vector<VkImageMemoryBarrier> imageBarriers;
std::vector<VkBufferMemoryBarrier> bufferBarriers;
};
struct StageInfo {
VkPipelineStageFlags srcStageMask;
VkPipelineStageFlags dstStageMask;
VkDependencyFlags dependencyFlags;
bool operator==(const StageInfo& other) const;
size_t hash() const;
};
struct StageInfoHash {
size_t operator()(const StageInfo& info) const;
};
std::unordered_map<StageInfo, BarrierInfo, StageInfoHash> m_info;
};
}
|
whupdup/frame
|
real/graphics/barrier_info_collection.hpp
|
C++
|
gpl-3.0
| 1,561
|
#include "graphics/base_frame_graph_pass.hpp"
using namespace ZN;
using namespace ZN::GFX;
void BaseFrameGraphPass::write_commands(CommandBuffer& cmd) {
if (m_commandCallback) {
m_commandCallback(cmd);
}
}
void BaseFrameGraphPass::add_texture_internal(ImageView& res, ResourceAccess::Enum access,
VkPipelineStageFlags stageFlags, VkImageLayout layout) {
m_textures.emplace(std::make_pair(res.reference_from_this(), TextureDependency{
.stageFlags = stageFlags,
.layout = layout,
.access = access
}));
}
void BaseFrameGraphPass::add_buffer_internal(Buffer& buffer, ResourceAccess::Enum access,
VkPipelineStageFlags stageFlags, VkDeviceSize offset, VkDeviceSize size) {
m_buffers.emplace(std::make_pair(buffer.reference_from_this(), BufferDependency{
.stageFlags = stageFlags,
.access = access,
.offset = offset,
.size = size,
}));
}
bool BaseFrameGraphPass::writes_image_internal(const Memory::IntrusivePtr<ImageView>& img) const {
if (auto it = m_textures.find(img); it != m_textures.end()
&& (it->second.access && ResourceAccess::WRITE)) {
return true;
}
return false;
}
bool BaseFrameGraphPass::writes_buffer_internal(const Memory::IntrusivePtr<Buffer>& buf) const {
if (auto it = m_buffers.find(buf); it != m_buffers.end()
&& (it->second.access & ResourceAccess::WRITE)) {
return true;
}
return false;
}
|
whupdup/frame
|
real/graphics/base_frame_graph_pass.cpp
|
C++
|
gpl-3.0
| 1,357
|
#pragma once
#include <graphics/buffer.hpp>
#include <graphics/image_view.hpp>
#include <functional>
#include <unordered_map>
namespace ZN::GFX {
class BarrierInfoCollection;
class CommandBuffer;
struct ResourceAccess {
enum Enum : uint8_t {
READ = 0b01,
WRITE = 0b10,
READ_WRITE = READ | WRITE
};
};
struct TextureDependency {
VkPipelineStageFlags stageFlags;
VkImageLayout layout;
ResourceAccess::Enum access;
};
struct AttachmentDependency {
VkClearValue clearValue;
ResourceAccess::Enum access;
bool clear;
};
struct BufferDependency {
VkPipelineStageFlags stageFlags;
ResourceAccess::Enum access;
VkDeviceSize offset;
VkDeviceSize size;
};
class BaseFrameGraphPass {
public:
explicit BaseFrameGraphPass() = default;
NULL_COPY_AND_ASSIGN(BaseFrameGraphPass);
void write_commands(CommandBuffer&);
template <typename Functor>
void for_each_texture(Functor&& func) const {
for (auto& [imageView, r] : m_textures) {
func(imageView, r);
}
}
template <typename Functor>
void for_each_buffer(Functor&& func) const {
for (auto& [buffer, r] : m_buffers) {
func(buffer, r);
}
}
protected:
void add_texture_internal(ImageView& res, ResourceAccess::Enum access,
VkPipelineStageFlags stageFlags, VkImageLayout layout);
void add_buffer_internal(Buffer& buffer, ResourceAccess::Enum access,
VkPipelineStageFlags stageFlags, VkDeviceSize offset, VkDeviceSize size);
template <typename Functor>
void add_command_callback_internal(Functor&& func) {
m_commandCallback = std::move(func);
}
bool writes_image_internal(const Memory::IntrusivePtr<ImageView>& img) const;
bool writes_buffer_internal(const Memory::IntrusivePtr<Buffer>& buf) const;
private:
std::function<void(CommandBuffer&)> m_commandCallback = {};
std::unordered_map<Memory::IntrusivePtr<ImageView>, TextureDependency> m_textures;
std::unordered_map<Memory::IntrusivePtr<Buffer>, BufferDependency> m_buffers;
};
template <typename Derived>
class FrameGraphPassMixin : public BaseFrameGraphPass {
public:
Derived& add_texture(ImageView& res, ResourceAccess::Enum access,
VkPipelineStageFlags stageFlags, VkImageLayout layout) {
add_texture_internal(res, access, stageFlags, layout);
return *static_cast<Derived*>(this);
}
Derived& add_buffer(Buffer& buffer, ResourceAccess::Enum access,
VkPipelineStageFlags stageFlags) {
add_buffer_internal(buffer, access, stageFlags, 0, buffer.get_size());
return *static_cast<Derived*>(this);
}
Derived& add_buffer(Buffer& buffer, ResourceAccess::Enum access,
VkPipelineStageFlags stageFlags, VkDeviceSize offset, VkDeviceSize size) {
add_buffer_internal(buffer, access, stageFlags, offset, size);
return *static_cast<Derived*>(this);
}
template <typename Functor>
Derived& add_command_callback(Functor&& func) {
add_command_callback_internal(std::move(func));
return *static_cast<Derived*>(this);
}
bool writes_image(const Memory::IntrusivePtr<ImageView>& img) const {
return static_cast<const Derived*>(this)->writes_image_internal(img);
}
bool writes_buffer(const Memory::IntrusivePtr<Buffer>& buf) const {
return static_cast<const Derived*>(this)->writes_buffer_internal(buf);
}
private:
};
}
|
whupdup/frame
|
real/graphics/base_frame_graph_pass.hpp
|
C++
|
gpl-3.0
| 3,267
|
#include "buffer.hpp"
#include <graphics/render_context.hpp>
#include <graphics/vk_common.hpp>
#include <cassert>
#include <Tracy.hpp>
using namespace ZN;
using namespace ZN::GFX;
static int64_t g_counter = 0;
Memory::IntrusivePtr<Buffer> Buffer::create(VkDeviceSize size, VkBufferUsageFlags usageFlags,
VmaMemoryUsage memoryUsage, VkMemoryPropertyFlags requiredFlags) {
VkBufferCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
createInfo.size = size;
createInfo.usage = usageFlags;
VmaAllocationCreateInfo allocInfo{};
allocInfo.usage = memoryUsage;
allocInfo.requiredFlags = requiredFlags;
VkBuffer buffer;
VmaAllocation allocation;
if (vmaCreateBuffer(g_renderContext->get_allocator(), &createInfo, &allocInfo, &buffer,
&allocation, nullptr) == VK_SUCCESS) {
return Memory::IntrusivePtr<Buffer>(new Buffer(buffer, allocation, size, usageFlags));
}
return {};
}
Buffer::Buffer(VkBuffer buffer, VmaAllocation allocation, VkDeviceSize size,
VkBufferUsageFlags usageFlags)
: m_buffer(buffer)
, m_allocation(allocation)
, m_size(size)
, m_mapping(nullptr)
, m_usageFlags(usageFlags) {
++g_counter;
TracyPlot("Buffers", g_counter);
}
Buffer::~Buffer() {
--g_counter;
TracyPlot("Buffers", g_counter);
if (m_buffer != VK_NULL_HANDLE) {
if (m_mapping) {
unmap();
}
g_renderContext->queue_delete([buffer=this->m_buffer, allocation=this->m_allocation] {
vmaDestroyBuffer(g_renderContext->get_allocator(), buffer, allocation);
});
}
}
void Buffer::update_subresources(BarrierInfoCollection& barrierInfo,
const BufferResourceTracker::ResourceInfo& range,
BufferResourceTracker::BarrierMode barrierMode, bool ignorePreviousState) {
m_resourceTracker.update_range(m_buffer, barrierInfo, range, barrierMode, ignorePreviousState);
}
Buffer::operator VkBuffer() const {
return m_buffer;
}
VkBuffer Buffer::get_buffer() const {
return m_buffer;
}
VmaAllocation Buffer::get_allocation() const {
return m_allocation;
}
VkDeviceSize Buffer::get_size() const {
return m_size;
}
VkBufferUsageFlags Buffer::get_usage_flags() const {
return m_usageFlags;
}
uint8_t* Buffer::map() {
if (m_mapping) {
return m_mapping;
}
void* mapping{};
VK_CHECK(vmaMapMemory(g_renderContext->get_allocator(), m_allocation, &mapping));
m_mapping = reinterpret_cast<uint8_t*>(mapping);
return m_mapping;
}
void Buffer::unmap() {
assert(m_mapping && "Attempting to unmap already unmapped buffer");
vmaUnmapMemory(g_renderContext->get_allocator(), m_allocation);
m_mapping = nullptr;
}
void Buffer::flush() {
vmaFlushAllocation(g_renderContext->get_allocator(), m_allocation, 0, m_size);
}
void Buffer::invalidate() {
vmaInvalidateAllocation(g_renderContext->get_allocator(), m_allocation, 0, m_size);
}
|
whupdup/frame
|
real/graphics/buffer.cpp
|
C++
|
gpl-3.0
| 2,795
|
#pragma once
#include <core/intrusive_ptr.hpp>
#include <graphics/buffer_resource_tracker.hpp>
#include <vk_mem_alloc.h>
namespace ZN::GFX {
class Buffer final : public Memory::ThreadSafeIntrusivePtrEnabled<Buffer> {
public:
static Memory::IntrusivePtr<Buffer> create(VkDeviceSize size, VkBufferUsageFlags,
VmaMemoryUsage, VkMemoryPropertyFlags requiredFlags = 0);
~Buffer();
NULL_COPY_AND_ASSIGN(Buffer);
uint8_t* map();
void unmap();
void flush();
void invalidate();
void update_subresources(BarrierInfoCollection& barrierInfo,
const BufferResourceTracker::ResourceInfo& range,
BufferResourceTracker::BarrierMode barrierMode, bool ignorePreviousState);
operator VkBuffer() const;
VkBuffer get_buffer() const;
VmaAllocation get_allocation() const;
VkDeviceSize get_size() const;
VkBufferUsageFlags get_usage_flags() const;
private:
VkBuffer m_buffer;
VmaAllocation m_allocation;
VkDeviceSize m_size;
uint8_t* m_mapping;
BufferResourceTracker m_resourceTracker;
VkBufferUsageFlags m_usageFlags;
explicit Buffer(VkBuffer, VmaAllocation, VkDeviceSize, VkBufferUsageFlags);
};
}
|
whupdup/frame
|
real/graphics/buffer.hpp
|
C++
|
gpl-3.0
| 1,144
|
#include "buffer_resource_tracker.hpp"
#include <graphics/barrier_info_collection.hpp>
#include <algorithm>
using namespace ZN;
using namespace ZN::GFX;
static bool test_range_start(const BufferResourceTracker::ResourceInfo& a,
const BufferResourceTracker::ResourceInfo& b);
static bool test_range_end(const BufferResourceTracker::ResourceInfo& a,
const BufferResourceTracker::ResourceInfo& b);
// ResourceInfo
bool BufferResourceTracker::ResourceInfo::states_equal(const ResourceInfo& other) const {
return stageFlags == other.stageFlags && accessMask == other.accessMask
&& queueFamilyIndex == other.queueFamilyIndex;
}
bool BufferResourceTracker::ResourceInfo::intersects(const ResourceInfo& other) const {
return offset < other.offset + other.size && offset + size > other.offset;
}
// this fully covers other
bool BufferResourceTracker::ResourceInfo::fully_covers(const ResourceInfo& other) const {
return other.offset >= offset && other.offset + other.size <= offset + size;
}
// BufferResourceTracker
void BufferResourceTracker::update_range(VkBuffer buffer, BarrierInfoCollection &barrierInfo,
const ResourceInfo &rangeIn, BarrierMode barrierMode, bool ignorePreviousState) {
insert_range_internal(buffer, rangeIn, barrierInfo, barrierMode, ignorePreviousState);
union_ranges();
}
size_t BufferResourceTracker::get_range_count() const {
return m_ranges.size();
}
void BufferResourceTracker::insert_range_internal(VkBuffer buffer, const ResourceInfo& rangeIn,
BarrierInfoCollection& barrierInfo, BarrierMode barrierMode, bool ignorePreviousState,
bool generateLastBarrier) {
/*auto checkEnd = std::upper_bound(m_ranges.begin(), m_ranges.end(), rangeIn, test_range_end);
auto checkStart = std::upper_bound(m_ranges.rbegin(), m_ranges.rend(), rangeIn,
test_range_start);
for (auto it = checkStart != m_ranges.rend() ? checkStart.base() : m_ranges.begin();
it != checkEnd; ++it) {*/
bool insertRangeIn = true;
if (!m_ranges.empty()) {
for (size_t i = m_ranges.size() - 1;; --i) {
auto& range = m_ranges[i];
auto statesEqual = range.states_equal(rangeIn);
bool needsQFOT = !ignorePreviousState
&& range.queueFamilyIndex != rangeIn.queueFamilyIndex;
if (range.fully_covers(rangeIn) && (statesEqual || needsQFOT)) {
// CASE 1: `rangeIn` is entirely covered by `range`, and states are equal, meaning
// no alternations need to be made to the range list
if (needsQFOT) {
add_barrier(buffer, barrierInfo, range, rangeIn, range.offset, range.size,
ignorePreviousState);
range.queueFamilyIndex = rangeIn.queueFamilyIndex;
}
else if (barrierMode == BarrierMode::ALWAYS) {
add_barrier(buffer, barrierInfo, range, rangeIn, rangeIn.offset,
rangeIn.size, ignorePreviousState);
}
return;
}
else if (rangeIn.fully_covers(range) && (ignorePreviousState || statesEqual)) {
// CASE 2: input range fully covers existing range, therefore remove it.
// This case is only valid if `rangeIn` can supersede the previous value
if (barrierMode == BarrierMode::ALWAYS) {
add_barrier(buffer, barrierInfo, range, rangeIn, range.offset, range.size,
ignorePreviousState);
generateLastBarrier = false;
}
m_ranges[i] = std::move(m_ranges.back());
m_ranges.pop_back();
}
else if (rangeIn.intersects(range)) {
// CASE 3: input range partially covers existing range, generate difference
// between the 2 ranges. If there is a barrier of interest, it will be on
// the intersection of both
//puts("CASE 3");
// CASE 3a: Needs QFOT, entire source range must be transitioned
if (needsQFOT && !rangeIn.fully_covers(range)) {
add_barrier(buffer, barrierInfo, range, rangeIn, range.offset, range.size,
false);
generate_range_difference(rangeIn, range, buffer, barrierInfo, barrierMode,
ignorePreviousState, generateLastBarrier);
range.queueFamilyIndex = rangeIn.queueFamilyIndex;
generateLastBarrier = false;
insertRangeIn = false;
}
else {
auto rangeCopy = std::move(range);
m_ranges[i] = std::move(m_ranges.back());
m_ranges.pop_back();
bool needsUniqueBarriers = barrierMode == BarrierMode::ALWAYS || !statesEqual;
if (needsUniqueBarriers && barrierMode != BarrierMode::NEVER) {
auto oldState = make_range_intersection(rangeCopy, rangeIn);
add_barrier(buffer, barrierInfo, oldState, rangeIn, oldState.offset,
oldState.size, ignorePreviousState);
}
generate_range_difference(rangeCopy, rangeIn, buffer, barrierInfo, barrierMode,
ignorePreviousState, false);
if (!ignorePreviousState) {
if (needsUniqueBarriers) {
generate_range_difference(rangeIn, rangeCopy, buffer, barrierInfo,
barrierMode, ignorePreviousState, generateLastBarrier);
m_ranges.emplace_back(make_range_intersection(rangeIn, rangeCopy));
return;
}
generateLastBarrier = false;
}
}
}
if (i == 0) {
break;
}
}
}
if (insertRangeIn) {
m_ranges.push_back(rangeIn);
}
if (generateLastBarrier && barrierMode != BarrierMode::NEVER) {
VkBufferMemoryBarrier barrier{
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = rangeIn.accessMask,
.srcQueueFamilyIndex = rangeIn.queueFamilyIndex,
.dstQueueFamilyIndex = rangeIn.queueFamilyIndex,
.buffer = buffer,
.offset = rangeIn.offset,
.size = rangeIn.size
};
barrierInfo.add_buffer_memory_barrier(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
rangeIn.stageFlags, 0, std::move(barrier));
}
}
void BufferResourceTracker::generate_range_difference(const ResourceInfo& a, const ResourceInfo& b,
VkBuffer buffer, BarrierInfoCollection& barrierInfo, BarrierMode barrierMode,
bool ignorePreviousState, bool generateLastBarrier) {
auto aMinX = a.offset;
auto aMaxX = a.offset + a.size;
auto bMinX = b.offset;
auto bMaxX = b.offset + b.size;
if (aMinX < bMinX) {
//printf("Generate range difference: case 3 {%u, %u, %u, %u}\n", aMinX, sideMinY,
// bMinX, sideMaxY);
insert_range_like(a, buffer, barrierInfo, barrierMode, ignorePreviousState,
generateLastBarrier, aMinX, bMinX);
}
if (bMaxX < aMaxX) {
//printf("Generate range difference: case 4 {%u, %u, %u, %u}\n",
// bMaxX, sideMinY, aMaxX, sideMaxY);
insert_range_like(a, buffer, barrierInfo, barrierMode, ignorePreviousState,
generateLastBarrier, bMaxX, aMaxX);
}
}
void BufferResourceTracker::insert_range_like(const ResourceInfo& info, VkBuffer buffer,
BarrierInfoCollection& barrierInfo, BarrierMode barrierMode, bool ignorePreviousState,
bool generateLastBarrier, VkDeviceSize minX, VkDeviceSize maxX) {
insert_range_internal(buffer, create_range_like(info, minX, maxX), barrierInfo,
barrierMode, ignorePreviousState, generateLastBarrier);
}
void BufferResourceTracker::union_ranges() {
bool foundUnion = false;
do {
foundUnion = false;
for (size_t i = 0; i < m_ranges.size(); ++i) {
auto& a = m_ranges[i];
for (size_t j = i + 1; j < m_ranges.size(); ++j) {
auto& b = m_ranges[j];
if (!a.states_equal(b)) {
continue;
}
auto aMinX = a.offset;
auto aMaxX = a.offset + a.size;
auto bMinX = b.offset;
auto bMaxX = b.offset + b.size;
if (aMaxX == bMinX) {
foundUnion = true;
//printf("Union case 3 {%u, %u, %u, %u} U {%u, %u, %u, %u}\n",
// aMinX, aMinY, aMaxX, aMaxY, bMinX, bMinY, bMaxX, bMaxY);
a.size = bMaxX - aMinX;
}
else if (aMinX == bMaxX) {
foundUnion = true;
//puts("Union case 4");
a.offset = bMinX;
a.size = aMaxX - bMinX;
}
if (foundUnion) {
m_ranges[j] = std::move(m_ranges.back());
m_ranges.pop_back();
break;
}
}
if (foundUnion) {
break;
}
}
}
while (foundUnion);
}
BufferResourceTracker::ResourceInfo BufferResourceTracker::create_range_like(
const ResourceInfo& info, VkDeviceSize minX, VkDeviceSize maxX) {
return {
.stageFlags = info.stageFlags,
.accessMask = info.accessMask,
.offset = minX,
.size = maxX - minX,
.queueFamilyIndex = info.queueFamilyIndex
};
}
void BufferResourceTracker::add_barrier(VkBuffer buffer, BarrierInfoCollection& barrierInfo,
const ResourceInfo& from, const ResourceInfo& to, VkDeviceSize offset,
VkDeviceSize size, bool ignorePreviousState) {
VkBufferMemoryBarrier barrier{
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
.srcAccessMask = ignorePreviousState ? 0 : from.accessMask,
.dstAccessMask = to.accessMask,
.srcQueueFamilyIndex = from.queueFamilyIndex,
.dstQueueFamilyIndex = to.queueFamilyIndex,
.buffer = buffer,
.offset = offset,
.size = size
};
if (barrier.srcQueueFamilyIndex == barrier.dstQueueFamilyIndex) {
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
}
barrierInfo.add_buffer_memory_barrier(from.stageFlags, to.stageFlags,
0 /* FIXME: figure out how to pass dependency flags */, std::move(barrier));
}
BufferResourceTracker::ResourceInfo BufferResourceTracker::make_range_intersection(
const ResourceInfo& a, const ResourceInfo& b) {
auto aMinX = a.offset;
auto aMaxX = a.offset + a.size;
auto bMinX = b.offset;
auto bMaxX = b.offset + b.size;
auto minX = aMinX > bMinX ? aMinX : bMinX;
auto maxX = aMaxX < bMaxX ? aMaxX : bMaxX;
return {
.stageFlags = a.stageFlags,
.accessMask = a.accessMask,
.offset = minX,
.size = maxX - minX,
.queueFamilyIndex = a.queueFamilyIndex
};
}
static bool test_range_start(const BufferResourceTracker::ResourceInfo& a,
const BufferResourceTracker::ResourceInfo& b) {
return a.offset + a.size < b.offset;
}
static bool test_range_end(const BufferResourceTracker::ResourceInfo& a,
const BufferResourceTracker::ResourceInfo& b) {
return a.offset > b.offset + b.size;
}
|
whupdup/frame
|
real/graphics/buffer_resource_tracker.cpp
|
C++
|
gpl-3.0
| 9,895
|
#pragma once
#include <volk.h>
#include <vector>
namespace ZN::GFX {
class BarrierInfoCollection;
class BufferResourceTracker final {
public:
struct ResourceInfo {
VkPipelineStageFlags stageFlags;
VkAccessFlags accessMask;
VkDeviceSize offset;
VkDeviceSize size;
uint32_t queueFamilyIndex;
bool states_equal(const ResourceInfo& other) const;
bool intersects(const ResourceInfo& other) const;
bool fully_covers(const ResourceInfo& other) const;
};
enum class BarrierMode {
ALWAYS,
NEVER
};
void update_range(VkBuffer buffer, BarrierInfoCollection& barrierInfo,
const ResourceInfo& rangeIn, BarrierMode barrierMode, bool ignorePreviousState);
size_t get_range_count() const;
private:
std::vector<ResourceInfo> m_ranges;
void insert_range_internal(VkBuffer buffer, const ResourceInfo& rangeIn,
BarrierInfoCollection& barrierInfo, BarrierMode barrierMode,
bool ignorePreviousState, bool generateLastBarrier = true);
// Emits ranges that are pieces of A if B was subtracted from it, meaning the resulting
// ranges include none of B's coverage
void generate_range_difference(const ResourceInfo& a, const ResourceInfo& b,
VkBuffer buffer, BarrierInfoCollection& barrierInfo, BarrierMode barrierMode,
bool ignorePreviousState, bool generateLastBarrier);
void insert_range_like(const ResourceInfo& info, VkBuffer buffer,
BarrierInfoCollection& barrierInfo, BarrierMode barrierMode,
bool ignorePreviousState, bool generateLastBarrier, VkDeviceSize minX,
VkDeviceSize maxX);
void union_ranges();
static ResourceInfo create_range_like(const ResourceInfo& info, VkDeviceSize minX,
VkDeviceSize maxX);
static void add_barrier(VkBuffer buffer, BarrierInfoCollection& barrierInfo,
const ResourceInfo& from, const ResourceInfo& to, VkDeviceSize offset,
VkDeviceSize size, bool ignorePreviousState);
static ResourceInfo make_range_intersection(const ResourceInfo& a, const ResourceInfo& b);
};
}
|
whupdup/frame
|
real/graphics/buffer_resource_tracker.hpp
|
C++
|
gpl-3.0
| 2,000
|
#pragma once
#include <core/memory.hpp>
#include <graphics/buffer.hpp>
#include <cassert>
namespace ZN::GFX {
template <typename T, VkBufferUsageFlags UsageFlags,
size_t InitialCapacity = 256,
VmaMemoryUsage MemoryUsage = VMA_MEMORY_USAGE_CPU_ONLY,
VkMemoryPropertyFlags RequiredFlags = 0>
class BufferVector {
public:
static_assert(InitialCapacity > 0, "Initial capacity must be > 0");
using value_type = T;
using size_type = size_t;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = T*;
using iterator = T*;
using const_iterator = const T*;
explicit BufferVector()
: m_buffer(Buffer::create(InitialCapacity * sizeof(T), UsageFlags,
MemoryUsage, RequiredFlags))
, m_bufferBegin(reinterpret_cast<T*>(m_buffer->map()))
, m_bufferEnd(m_bufferBegin) {}
BufferVector(BufferVector&& other) noexcept
: m_buffer(std::move(other.m_buffer))
, m_bufferBegin(other.m_bufferBegin)
, m_bufferEnd(other.m_bufferEnd) {
other.m_buffer = nullptr;
other.m_bufferBegin = nullptr;
other.m_bufferEnd = nullptr;
}
BufferVector& operator=(BufferVector&& other) noexcept {
m_buffer = std::move(other.m_buffer);
m_bufferBegin = other.m_bufferBegin;
m_bufferEnd = other.m_bufferEnd;
other.m_buffer = nullptr;
other.m_bufferBegin = nullptr;
other.m_bufferEnd = nullptr;
return *this;
}
BufferVector(const BufferVector&) = delete;
void operator=(const BufferVector&) = delete;
~BufferVector() {
Memory::destroy(m_bufferBegin, m_bufferEnd);
}
iterator begin() {
return m_bufferBegin;
}
iterator end() {
return m_bufferEnd;
}
const_iterator cbegin() const {
return m_bufferBegin;
}
const_iterator cend() const {
return m_bufferEnd;
}
void push_back(const T& value) {
ensure_capacity();
Memory::construct_at(m_bufferEnd, value);
++m_bufferEnd;
}
void push_back(T&& value) {
ensure_capacity();
Memory::construct_at(m_bufferEnd, std::move(value));
++m_bufferEnd;
}
template <typename... Args>
void emplace_back(Args&&... args) {
ensure_capacity();
Memory::construct_at(m_bufferEnd, std::forward<Args>(args)...);
++m_bufferEnd;
}
void pop_back() {
assert(!empty() && "Cannot pop_back() an empty vector");
Memory::destroy_at(m_bufferEnd - 1);
--m_bufferEnd;
}
iterator insert(const_iterator pos, const T& value) {
size_t index = pos - m_bufferBegin;
prepare_insert(index);
Memory::construct_at(m_bufferBegin + index, value);
return m_bufferBegin + index;
}
iterator insert(const_iterator pos, T&& value) {
size_t index = pos - m_bufferBegin;
prepare_insert(index);
Memory::construct_at(m_bufferBegin + index, std::move(value));
return m_bufferBegin + index;
}
template <typename... Args>
iterator emplace(const_iterator pos, Args&&... args) {
size_t index = pos - m_bufferBegin;
prepare_insert(index);
Memory::construct_at(m_bufferBegin + index, std::forward<Args>(args)...);
return m_bufferBegin + index;
}
void reserve(size_type cap) {
reserve_bytes(cap * sizeof(T));
}
void resize(size_type count) {
auto sz = size();
if (count > sz) {
for (size_type i = sz; i < count; ++i) {
emplace_back();
}
}
else if (count < sz) {
for (size_type i = count; i < sz; ++i) {
pop_back();
}
}
}
void clear() {
Memory::destroy(m_bufferBegin, m_bufferEnd);
m_bufferEnd = m_bufferBegin;
}
reference operator[](size_type index) {
return m_bufferBegin[index];
}
const_reference operator[](size_type index) const {
return m_bufferBegin[index];
}
reference front() {
return m_bufferBegin[0];
}
const_reference front() const {
return m_bufferBegin[0];
}
reference back() {
return m_bufferEnd[-1];
}
const_reference back() const {
return m_bufferEnd[-1];
}
bool empty() const {
return m_bufferBegin == m_bufferEnd;
}
size_type size() const {
return m_bufferEnd - m_bufferBegin;
}
size_type size_bytes() const {
return sizeof(T) * size();
}
size_type capacity() const {
return m_buffer->get_size() / sizeof(T);
}
size_type byte_capacity() const {
return m_buffer->get_size();
}
T* data() {
return m_bufferBegin;
}
const T* data() const {
return m_bufferBegin;
}
Buffer& buffer() const {
return *m_buffer;
}
private:
Memory::IntrusivePtr<Buffer> m_buffer;
T* m_bufferBegin;
T* m_bufferEnd;
void prepare_insert(size_t index) {
ensure_capacity();
++m_bufferEnd;
if (!empty()) {
for (size_t i = size() - 1; i > index; --i) {
m_bufferBegin[i] = std::move(m_bufferBegin[i - 1]);
}
}
}
void ensure_capacity() {
if (sizeof(T) * size() == byte_capacity()) {
reserve_bytes(2 * byte_capacity());
}
}
void reserve_bytes(size_t numBytes) {
auto newBuffer = Buffer::create(numBytes, UsageFlags, MemoryUsage, RequiredFlags);
T* newBegin = reinterpret_cast<T*>(newBuffer->map());
T* newEnd = newBegin + size();
Memory::uninitialized_move(m_bufferBegin, m_bufferEnd, newBegin);
Memory::destroy(m_bufferBegin, m_bufferEnd);
m_buffer = std::move(newBuffer);
m_bufferBegin = newBegin;
m_bufferEnd = newEnd;
}
};
}
|
whupdup/frame
|
real/graphics/buffer_vector.hpp
|
C++
|
gpl-3.0
| 5,309
|
#include "command_buffer.hpp"
#include <core/scoped_lock.hpp>
#include <graphics/compute_pipeline.hpp>
#include <graphics/graphics_pipeline.hpp>
#include <graphics/render_context.hpp>
#include <graphics/vk_common.hpp>
#include <graphics/vk_initializers.hpp>
#include <TracyVulkan.hpp>
using namespace ZN;
using namespace ZN::GFX;
IntrusivePtr<CommandBuffer> CommandBuffer::create(VkDevice device, uint32_t queueFamilyIndex) {
return IntrusivePtr<CommandBuffer>(new CommandBuffer(CommandPool::create(device,
queueFamilyIndex, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT)));
}
CommandBuffer::CommandBuffer(IntrusivePtr<CommandPool> pool)
: m_pool(std::move(pool))
, m_mutex(Scheduler::Mutex::create())
, m_currentPass{}
, m_currentSubpass(0) {}
void CommandBuffer::recording_begin() {
ScopedLock lock(*m_mutex);
m_pool->reset();
m_keepAliveBuffers.clear();
m_keepAliveImages.clear();
m_keepAliveRenderPasses.clear();
auto tmp = std::move(m_keepAliveFramebuffers[FRAMEBUFFER_KEEP_ALIVE_COUNT - 1]);
tmp.clear();
for (size_t i = FRAMEBUFFER_KEEP_ALIVE_COUNT - 1; i != 0; --i) {
m_keepAliveFramebuffers[i] = std::move(m_keepAliveFramebuffers[i - 1]);
}
m_keepAliveFramebuffers[0] = std::move(tmp);
m_cmd = m_pool->alloc_buffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY);
auto beginInfo = vkinit::command_buffer_begin_info(
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT);
vkBeginCommandBuffer(m_cmd, &beginInfo);
}
void CommandBuffer::recording_end() {
ScopedLock lock(*m_mutex);
if (m_pool->get_queue_family() == g_renderContext->get_graphics_queue_family()) {
TracyVkCollect(g_renderContext->get_tracy_context(), m_cmd);
}
VK_CHECK(vkEndCommandBuffer(m_cmd));
}
void CommandBuffer::begin_render_pass(RenderPass& renderPass, Framebuffer& framebuffer,
uint32_t clearValueCount, const VkClearValue* pClearValues) {
VkRenderPassBeginInfo beginInfo{
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
.renderPass = renderPass,
.framebuffer = framebuffer,
.renderArea = {
.offset = {},
.extent = {framebuffer.get_width(), framebuffer.get_height()}
},
.clearValueCount = clearValueCount,
.pClearValues = pClearValues
};
VkViewport viewport{
.width = static_cast<float>(framebuffer.get_width()),
.height = static_cast<float>(framebuffer.get_height()),
.maxDepth = 1.f
};
VkRect2D scissor{
.offset = {},
.extent = {
framebuffer.get_width(),
framebuffer.get_height()
}
};
ScopedLock lock(*m_mutex);
vkCmdBeginRenderPass(m_cmd, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
vkCmdSetViewport(m_cmd, 0, 1, &viewport);
vkCmdSetScissor(m_cmd, 0, 1, &scissor);
m_keepAliveRenderPasses.emplace_back(renderPass.reference_from_this());
m_keepAliveFramebuffers[0].emplace_back(framebuffer.reference_from_this());
m_currentPass = renderPass.reference_from_this();
m_currentSubpass = 0;
}
void CommandBuffer::next_subpass() {
ScopedLock lock(*m_mutex);
vkCmdNextSubpass(m_cmd, VK_SUBPASS_CONTENTS_INLINE);
++m_currentSubpass;
}
void CommandBuffer::end_render_pass() {
ScopedLock lock(*m_mutex);
vkCmdEndRenderPass(m_cmd);
m_currentPass = {};
}
void CommandBuffer::bind_pipeline(GraphicsPipeline& pipeline) {
ScopedLock lock(*m_mutex);
m_currentBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
m_currentLayout = pipeline.get_layout();
vkCmdBindPipeline(m_cmd, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline.get_pipeline(*m_currentPass, m_currentSubpass));
}
void CommandBuffer::bind_pipeline(ComputePipeline& pipeline) {
ScopedLock lock(*m_mutex);
m_currentBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
m_currentLayout = pipeline.get_layout();
vkCmdBindPipeline(m_cmd, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline.get_pipeline());
}
void CommandBuffer::push_constants(VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
const void *pValues) {
ScopedLock lock(*m_mutex);
vkCmdPushConstants(m_cmd, m_currentLayout, stageFlags, offset, size, pValues);
}
void CommandBuffer::bind_vertex_buffers(uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
ScopedLock lock(*m_mutex);
vkCmdBindVertexBuffers(m_cmd, firstBinding, bindingCount, pBuffers, pOffsets);
}
void CommandBuffer::bind_index_buffer(VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) {
ScopedLock lock(*m_mutex);
vkCmdBindIndexBuffer(m_cmd, buffer, offset, indexType);
}
void CommandBuffer::bind_descriptor_sets(uint32_t firstSet, uint32_t descriptorSetCount,
const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t* pDynamicOffsets) {
ScopedLock lock(*m_mutex);
vkCmdBindDescriptorSets(m_cmd, m_currentBindPoint, m_currentLayout, firstSet,
descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
}
void CommandBuffer::draw(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
uint32_t firstInstance) {
ScopedLock lock(*m_mutex);
vkCmdDraw(m_cmd, vertexCount, instanceCount, firstVertex, firstInstance);
}
void CommandBuffer::draw_indexed(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex,
int32_t vertexOffset, uint32_t firstInstance) {
ScopedLock lock(*m_mutex);
vkCmdDrawIndexed(m_cmd, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
}
void CommandBuffer::draw_indexed_indirect(Buffer& buffer, VkDeviceSize offset, uint32_t drawCount,
uint32_t stride) {
ScopedLock lock(*m_mutex);
vkCmdDrawIndexedIndirect(m_cmd, buffer, offset, drawCount, stride);
}
void CommandBuffer::dispatch(uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) {
ScopedLock lock(*m_mutex);
vkCmdDispatch(m_cmd, groupCountX, groupCountY, groupCountZ);
}
void CommandBuffer::copy_buffer(Buffer& srcBuffer, Buffer& dstBuffer, uint32_t regionCount,
const VkBufferCopy *pRegions) {
ScopedLock lock(*m_mutex);
vkCmdCopyBuffer(m_cmd, srcBuffer, dstBuffer, regionCount, pRegions);
m_keepAliveBuffers.emplace_back(srcBuffer.reference_from_this());
m_keepAliveBuffers.emplace_back(dstBuffer.reference_from_this());
}
void CommandBuffer::copy_buffer_to_image(Buffer& srcBuffer, Image& dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions) {
ScopedLock lock(*m_mutex);
vkCmdCopyBufferToImage(m_cmd, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
m_keepAliveBuffers.emplace_back(srcBuffer.reference_from_this());
m_keepAliveImages.emplace_back(dstImage.reference_from_this());
}
void CommandBuffer::pipeline_barrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) {
ScopedLock lock(*m_mutex);
vkCmdPipelineBarrier(m_cmd, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
CommandBuffer::operator VkCommandBuffer() const {
return m_cmd;
}
const VkCommandBuffer* CommandBuffer::get_buffers() const {
return &m_cmd;
}
|
whupdup/frame
|
real/graphics/command_buffer.cpp
|
C++
|
gpl-3.0
| 7,248
|
#pragma once
#include <graphics/buffer.hpp>
#include <graphics/command_pool.hpp>
#include <graphics/framebuffer.hpp>
#include <graphics/render_pass.hpp>
#include <scheduler/task_scheduler.hpp>
#include <vector>
namespace ZN::GFX {
class ComputePipeline;
class GraphicsPipeline;
class CommandBuffer final : public Memory::ThreadSafeIntrusivePtrEnabled<CommandBuffer> {
public:
static constexpr const size_t FRAMEBUFFER_KEEP_ALIVE_COUNT = 2;
static IntrusivePtr<CommandBuffer> create(VkDevice device, uint32_t queueFamilyIndex);
~CommandBuffer() = default;
NULL_COPY_AND_ASSIGN(CommandBuffer);
void recording_begin();
void recording_end();
void begin_render_pass(RenderPass&, Framebuffer&, uint32_t clearValueCount,
const VkClearValue* pClearValues);
void next_subpass();
void end_render_pass();
void bind_pipeline(GraphicsPipeline&);
void bind_pipeline(ComputePipeline&);
void push_constants(VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
const void* pValues);
void bind_vertex_buffers(uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
void bind_index_buffer(VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
void bind_descriptor_sets(uint32_t firstSet, uint32_t descriptorSetCount,
const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t* pDynamicOffsets);
void draw(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
uint32_t firstInstance);
void draw_indexed(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex,
int32_t vertexOffset, uint32_t firstInstance);
void draw_indexed_indirect(Buffer& buffer, VkDeviceSize offset, uint32_t drawCount,
uint32_t stride);
void dispatch(uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
void copy_buffer(Buffer& srcBuffer, Buffer& dstBuffer, uint32_t regionCount,
const VkBufferCopy* pRegions);
void copy_buffer_to_image(Buffer& srcBuffer, Image& dstImage, VkImageLayout dstImageLayout,
uint32_t regionCount, const VkBufferImageCopy* pRegions);
void pipeline_barrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier* pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier* pImageMemoryBarriers);
operator VkCommandBuffer() const;
const VkCommandBuffer* get_buffers() const;
private:
IntrusivePtr<CommandPool> m_pool;
VkCommandBuffer m_cmd;
std::vector<IntrusivePtr<Buffer>> m_keepAliveBuffers;
std::vector<IntrusivePtr<Image>> m_keepAliveImages;
std::vector<IntrusivePtr<RenderPass>> m_keepAliveRenderPasses;
std::vector<IntrusivePtr<Framebuffer>> m_keepAliveFramebuffers[
FRAMEBUFFER_KEEP_ALIVE_COUNT];
IntrusivePtr<Scheduler::Mutex> m_mutex;
IntrusivePtr<RenderPass> m_currentPass;
VkPipelineLayout m_currentLayout;
uint32_t m_currentSubpass;
VkPipelineBindPoint m_currentBindPoint;
explicit CommandBuffer(IntrusivePtr<CommandPool>);
};
}
|
whupdup/frame
|
real/graphics/command_buffer.hpp
|
C++
|
gpl-3.0
| 3,217
|
#include "command_pool.hpp"
#include <graphics/render_context.hpp>
#include <graphics/vk_common.hpp>
using namespace ZN;
using namespace ZN::GFX;
Memory::IntrusivePtr<CommandPool> CommandPool::create(VkDevice device, uint32_t queueFamilyIndex,
VkCommandPoolCreateFlags createFlags) {
VkCommandPoolCreateInfo createInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = createFlags,
.queueFamilyIndex = queueFamilyIndex
};
VkCommandPool commandPool;
if (vkCreateCommandPool(device, &createInfo, nullptr, &commandPool) == VK_SUCCESS) {
return Memory::IntrusivePtr<CommandPool>(new CommandPool(commandPool, queueFamilyIndex));
}
return {};
}
CommandPool::CommandPool(VkCommandPool pool, uint32_t queueFamily)
: m_pool(pool)
, m_queueFamily(queueFamily) {}
CommandPool::~CommandPool() {
g_renderContext->queue_delete([pool=m_pool] {
vkDestroyCommandPool(g_renderContext->get_device(), pool, nullptr);
});
}
VkCommandBuffer CommandPool::alloc_buffer(VkCommandBufferLevel level) {
VkCommandBufferAllocateInfo allocInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.pNext = nullptr,
.commandPool = m_pool,
.level = level,
.commandBufferCount = 1
};
VkCommandBuffer cmd;
if (vkAllocateCommandBuffers(g_renderContext->get_device(), &allocInfo, &cmd) == VK_SUCCESS) {
return cmd;
}
return VK_NULL_HANDLE;
}
void CommandPool::free_buffer(VkCommandBuffer cmd) {
vkFreeCommandBuffers(g_renderContext->get_device(), m_pool, 1, &cmd);
}
void CommandPool::reset(VkCommandPoolResetFlags flags) {
VK_CHECK(vkResetCommandPool(g_renderContext->get_device(), m_pool, flags));
}
CommandPool::operator VkCommandPool() const {
return m_pool;
}
uint32_t CommandPool::get_queue_family() const {
return m_queueFamily;
}
|
whupdup/frame
|
real/graphics/command_pool.cpp
|
C++
|
gpl-3.0
| 1,801
|
#pragma once
#include <core/intrusive_ptr.hpp>
#include <volk.h>
namespace ZN::GFX {
class CommandPool final : public Memory::IntrusivePtrEnabled<CommandPool> {
public:
static Memory::IntrusivePtr<CommandPool> create(VkDevice device, uint32_t queueFamilyIndex,
VkCommandPoolCreateFlags createFlags = 0);
~CommandPool();
VkCommandBuffer alloc_buffer(VkCommandBufferLevel);
void free_buffer(VkCommandBuffer);
void reset(VkCommandPoolResetFlags flags = 0);
operator VkCommandPool() const;
uint32_t get_queue_family() const;
private:
VkCommandPool m_pool;
uint32_t m_queueFamily;
explicit CommandPool(VkCommandPool, uint32_t queueFamily);
};
}
|
whupdup/frame
|
real/graphics/command_pool.hpp
|
C++
|
gpl-3.0
| 678
|
#include "compute_pipeline.hpp"
#include <graphics/render_context.hpp>
using namespace ZN;
using namespace ZN::GFX;
// ComputePipeline
Memory::IntrusivePtr<ComputePipeline> ComputePipeline::create(VkPipeline pipeline,
VkPipelineLayout layout) {
return Memory::IntrusivePtr(new ComputePipeline(pipeline, layout));
}
ComputePipeline::ComputePipeline(VkPipeline pipeline, VkPipelineLayout layout)
: m_pipeline(pipeline)
, m_layout(layout) {}
ComputePipeline::~ComputePipeline() {
if (m_pipeline != VK_NULL_HANDLE) {
g_renderContext->queue_delete([pipeline=this->m_pipeline] {
vkDestroyPipeline(g_renderContext->get_device(), pipeline, nullptr);
});
}
}
VkPipeline ComputePipeline::get_pipeline() const {
return m_pipeline;
}
VkPipelineLayout ComputePipeline::get_layout() const {
return m_layout;
}
// ComputePipelineBuilder
ComputePipelineBuilder& ComputePipelineBuilder::add_program(ShaderProgram& program) {
m_program = program.reference_from_this();
return *this;
}
Memory::IntrusivePtr<ComputePipeline> ComputePipelineBuilder::build() {
VkPipeline pipeline;
VkComputePipelineCreateInfo createInfo{
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.stage = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
.module = m_program->get_shader_modules()[0],
.pName = "main"
},
.layout = m_program->get_pipeline_layout()
};
if (vkCreateComputePipelines(g_renderContext->get_device(), VK_NULL_HANDLE, 1, &createInfo,
nullptr, &pipeline) == VK_SUCCESS) {
return ComputePipeline::create(pipeline, m_program->get_pipeline_layout());
}
return {};
}
|
whupdup/frame
|
real/graphics/compute_pipeline.cpp
|
C++
|
gpl-3.0
| 1,666
|
#pragma once
#include <graphics/shader_program.hpp>
namespace ZN::GFX {
class ComputePipeline final : public Memory::ThreadSafeIntrusivePtrEnabled<ComputePipeline> {
public:
static Memory::IntrusivePtr<ComputePipeline> create(VkPipeline, VkPipelineLayout);
~ComputePipeline();
NULL_COPY_AND_ASSIGN(ComputePipeline);
VkPipeline get_pipeline() const;
VkPipelineLayout get_layout() const;
private:
VkPipeline m_pipeline;
VkPipelineLayout m_layout;
explicit ComputePipeline(VkPipeline, VkPipelineLayout);
};
class ComputePipelineBuilder final {
public:
explicit ComputePipelineBuilder() = default;
NULL_COPY_AND_ASSIGN(ComputePipelineBuilder);
ComputePipelineBuilder& add_program(ShaderProgram&);
[[nodiscard]] Memory::IntrusivePtr<ComputePipeline> build();
private:
Memory::IntrusivePtr<ShaderProgram> m_program;
};
}
|
whupdup/frame
|
real/graphics/compute_pipeline.hpp
|
C++
|
gpl-3.0
| 858
|
#include "dds_texture.hpp"
#include <cstring>
#define MAKEFOURCC(a, b, c, d) \
((uint32_t)(uint8_t)(a) | ((uint32_t)(uint8_t)(b) << 8) | \
((uint32_t)(uint8_t)(c) << 16) | ((uint32_t)(uint8_t)(d) << 24 ))
#define MAKEFOURCCDXT(a) MAKEFOURCC('D', 'X', 'T', a)
#define FOURCC_DXT1 MAKEFOURCCDXT('1')
#define FOURCC_DXT2 MAKEFOURCCDXT('2')
#define FOURCC_DXT3 MAKEFOURCCDXT('3')
#define FOURCC_DXT4 MAKEFOURCCDXT('4')
#define FOURCC_DXT5 MAKEFOURCCDXT('5')
#define FOURCC_R16F 0x0000006F
#define FOURCC_G16R16F 0x00000070
#define FOURCC_A16B16G16R16F 0x00000071
#define FOURCC_R32F 0x00000072
#define FOURCC_G32R32F 0x00000073
#define FOURCC_A32B32G32R32F 0x00000074
// caps1
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
// caps2
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
// pixel format flags
#define DDPF_ALPHAPIXELS 0x1
#define DDPF_ALPHA 0x2
#define DDPF_FOURCC 0x4
#define DDPF_RGB 0x40
#define DDPF_YUV 0x200
#define DDPF_LUMINANCE 0x20000
namespace {
struct Header {
uint32_t size;
uint32_t flags;
uint32_t height;
uint32_t width;
uint32_t pitchOrLinearSize;
uint32_t depth;
uint32_t mipMapCount;
uint32_t reserved1[11];
struct PixelFormat {
uint32_t size;
uint32_t flags;
uint32_t fourCC;
uint32_t rgbBitCount;
uint32_t rBitMask;
uint32_t gBitMask;
uint32_t bBitMask;
} pixelFormat;
uint32_t caps;
uint32_t caps2;
uint32_t caps3;
uint32_t caps4;
uint32_t reserved2;
};
}
using namespace ZN;
using namespace ZN::GFX::DDS;
static void calc_size(const Header&, DDSTextureInfo&);
static void get_format(const Header&, DDSTextureInfo&, bool);
static bool is_compressed(const Header&);
bool ZN::GFX::DDS::get_texture_info(DDSTextureInfo& textureInfo, const void* memory, size_t size,
bool srgb) {
if (size < 128) {
return false;
}
const uint8_t* bytes = reinterpret_cast<const uint8_t*>(memory);
if (strncmp(reinterpret_cast<const char*>(bytes), "DDS ", 4) != 0) {
return false;
}
const Header& header = *reinterpret_cast<const Header*>(bytes + 4);
textureInfo.height = header.height;
textureInfo.width = header.width;
textureInfo.mipMapCount = header.mipMapCount;
calc_size(header, textureInfo);
get_format(header, textureInfo, srgb);
textureInfo.hasMipMaps = header.caps & DDSCAPS_MIPMAP;
textureInfo.isCubeMap = header.caps2 & DDSCAPS2_CUBEMAP;
textureInfo.isCompressed = is_compressed(header);
textureInfo.hasAlpha = header.pixelFormat.flags & DDPF_ALPHAPIXELS;
textureInfo.dataStart = bytes + 128;
return true;
}
static void get_format(const Header& header, DDSTextureInfo& textureInfo, bool srgb) {
switch (header.pixelFormat.fourCC) {
case FOURCC_DXT1:
textureInfo.format = textureInfo.hasAlpha ? VK_FORMAT_BC1_RGBA_UNORM_BLOCK
: VK_FORMAT_BC1_RGB_UNORM_BLOCK;
break;
case FOURCC_DXT3:
textureInfo.format = VK_FORMAT_BC2_UNORM_BLOCK;
break;
case FOURCC_DXT5:
textureInfo.format = VK_FORMAT_BC3_UNORM_BLOCK;
break;
case FOURCC_R16F:
textureInfo.format = VK_FORMAT_R16_SFLOAT;
break;
case FOURCC_G16R16F:
textureInfo.format = VK_FORMAT_R16G16_SFLOAT;
break;
case FOURCC_A16B16G16R16F:
textureInfo.format = VK_FORMAT_R16G16B16A16_SFLOAT;
break;
case FOURCC_R32F:
textureInfo.format = VK_FORMAT_R32_SFLOAT;
break;
case FOURCC_G32R32F:
textureInfo.format = VK_FORMAT_R32G32_SFLOAT;
break;
case FOURCC_A32B32G32R32F:
textureInfo.format = VK_FORMAT_R32G32B32A32_SFLOAT;
break;
default:
textureInfo.format = VK_FORMAT_UNDEFINED;
}
if (srgb && textureInfo.format != VK_FORMAT_UNDEFINED) {
textureInfo.format = static_cast<VkFormat>(static_cast<uint32_t>(textureInfo.format) + 1);
}
}
static bool is_compressed(const Header& header) {
if ((header.pixelFormat.flags & DDPF_FOURCC) == 0) {
return false;
}
switch (header.pixelFormat.fourCC) {
case FOURCC_DXT1:
case FOURCC_DXT3:
case FOURCC_DXT5:
return true;
default:
return false;
}
}
static void calc_size(const Header& header, DDSTextureInfo& textureInfo) {
uint32_t size;
if (is_compressed(header)) {
size = header.pitchOrLinearSize;
}
else {
size = textureInfo.width * textureInfo.height;
switch (header.pixelFormat.fourCC) {
case FOURCC_A16B16G16R16F:
size *= 8;
break;
case FOURCC_A32B32G32R32F:
size *= 16;
break;
}
}
if (textureInfo.hasMipMaps) {
size = size * 3 / 2;
}
if (textureInfo.isCubeMap) {
size *= 6;
}
textureInfo.size = size;
}
|
whupdup/frame
|
real/graphics/dds_texture.cpp
|
C++
|
gpl-3.0
| 4,866
|
#pragma once
#include <core/common.hpp>
#include <volk.h>
namespace ZN::GFX::DDS {
struct DDSTextureInfo {
uint32_t width;
uint32_t height;
uint32_t size;
VkFormat format;
uint32_t mipMapCount;
const uint8_t* dataStart;
bool isCubeMap;
bool hasMipMaps;
bool isCompressed;
bool hasAlpha;
};
bool get_texture_info(DDSTextureInfo& textureInfo, const void* memory, size_t size,
bool srgb);
}
|
whupdup/frame
|
real/graphics/dds_texture.hpp
|
C++
|
gpl-3.0
| 407
|
#include "descriptors.hpp"
#include <core/fixed_array.hpp>
#include <core/pair.hpp>
#include <graphics/vk_common.hpp>
#include <algorithm>
#include <cassert>
#include <cstring>
using namespace ZN;
using namespace ZN::GFX;
static constexpr FixedArray g_poolCreateRatios = {
Pair{VK_DESCRIPTOR_TYPE_SAMPLER, 0.5f},
Pair{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 4.f},
Pair{VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 4.f},
Pair{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1.f},
Pair{VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1.f},
Pair{VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1.f},
Pair{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2.f},
Pair{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2.f},
Pair{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1.f},
Pair{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1.f},
Pair{VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 0.5f}
};
static constexpr const uint32_t DESCRIPTORS_PER_POOL = 1000;
// DescriptorAllocator
DescriptorAllocator::DescriptorAllocator(VkDevice device, VkDescriptorPoolCreateFlags flags)
: m_currentPool(VK_NULL_HANDLE)
, m_device(device)
, m_createFlags(flags) {}
DescriptorAllocator::~DescriptorAllocator() {
for (auto pool : m_pools) {
vkDestroyDescriptorPool(m_device, pool, nullptr);
}
for (auto pool : m_freePools) {
vkDestroyDescriptorPool(m_device, pool, nullptr);
}
}
VkDescriptorSet DescriptorAllocator::allocate(VkDescriptorSetLayout layout,
uint32_t dynamicArrayCount) {
if (m_currentPool == VK_NULL_HANDLE) {
m_currentPool = create_pool();
m_pools.push_back(m_currentPool);
}
VkDescriptorSetAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.pSetLayouts = &layout;
allocInfo.descriptorPool = m_currentPool;
allocInfo.descriptorSetCount = 1;
VkDescriptorSetVariableDescriptorCountAllocateInfo setCounts{};
if (dynamicArrayCount > 0) {
setCounts.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO;
setCounts.descriptorSetCount = 1;
setCounts.pDescriptorCounts = &dynamicArrayCount;
allocInfo.pNext = &setCounts;
}
VkDescriptorSet descriptorSet = VK_NULL_HANDLE;
VkResult allocResult = vkAllocateDescriptorSets(m_device, &allocInfo, &descriptorSet);
switch (allocResult) {
case VK_SUCCESS:
return descriptorSet;
case VK_ERROR_FRAGMENTED_POOL:
case VK_ERROR_OUT_OF_POOL_MEMORY:
m_currentPool = create_pool();
m_pools.push_back(m_currentPool);
allocInfo.descriptorPool = m_currentPool;
VK_CHECK(vkAllocateDescriptorSets(m_device, &allocInfo, &descriptorSet));
return descriptorSet;
default:
break;
}
// FIXME: if the program reaches here, this means there was a fatal error
return descriptorSet;
}
void DescriptorAllocator::reset_pools() {
for (auto pool : m_pools) {
vkResetDescriptorPool(m_device, pool, 0);
m_freePools.push_back(pool);
}
m_pools.clear();
m_currentPool = VK_NULL_HANDLE;
}
VkDevice DescriptorAllocator::get_device() const {
return m_device;
}
VkDescriptorPool DescriptorAllocator::create_pool() {
if (!m_freePools.empty()) {
auto res = m_freePools.back();
m_freePools.pop_back();
return res;
}
std::vector<VkDescriptorPoolSize> sizes;
sizes.reserve(g_poolCreateRatios.size());
for (auto& [descriptorType, weight] : g_poolCreateRatios) {
sizes.emplace_back(VkDescriptorPoolSize{
descriptorType,
static_cast<uint32_t>(weight * DESCRIPTORS_PER_POOL)
});
}
VkDescriptorPoolCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
createInfo.flags = m_createFlags;
createInfo.maxSets = DESCRIPTORS_PER_POOL;
createInfo.poolSizeCount = static_cast<uint32_t>(sizes.size());
createInfo.pPoolSizes = sizes.data();
VkDescriptorPool descriptorPool;
VK_CHECK(vkCreateDescriptorPool(m_device, &createInfo, nullptr, &descriptorPool));
return descriptorPool;
}
// DescriptorLayoutCache
DescriptorLayoutCache::DescriptorLayoutCache(VkDevice device)
: m_device(device) {}
DescriptorLayoutCache::~DescriptorLayoutCache() {
for (auto& [_, layout] : m_setLayouts) {
vkDestroyDescriptorSetLayout(m_device, layout, nullptr);
}
m_setLayouts.clear();
}
VkDescriptorSetLayout DescriptorLayoutCache::get(
const VkDescriptorSetLayoutCreateInfo& createInfo) {
LayoutInfo layoutInfo{};
layoutInfo.numBindings = createInfo.bindingCount;
// FIXME: make this check the sType and perhaps the actual structure contents
layoutInfo.hasUnboundedArray = createInfo.pNext != nullptr;
assert(createInfo.bindingCount <= std::size(layoutInfo.bindings)
&& "Must have <= bindings than the layout info storage cap");
layoutInfo.numBindings = createInfo.bindingCount;
memcpy(layoutInfo.bindings, createInfo.pBindings,
createInfo.bindingCount * sizeof(VkDescriptorSetLayoutBinding));
std::sort(layoutInfo.bindings, layoutInfo.bindings + createInfo.bindingCount,
[](const auto& a, const auto& b) {
return a.binding < b.binding;
});
if (auto it = m_setLayouts.find(layoutInfo); it != m_setLayouts.end()) {
return it->second;
}
VkDescriptorSetLayout layout;
VK_CHECK(vkCreateDescriptorSetLayout(m_device, &createInfo, nullptr, &layout));
m_setLayouts.emplace(std::make_pair(layoutInfo, layout));
return layout;
}
// LayoutInfo
bool DescriptorLayoutCache::LayoutInfo::operator==(const LayoutInfo& other) const {
if (numBindings != other.numBindings) {
return false;
}
if (hasUnboundedArray != other.hasUnboundedArray) {
return false;
}
for (size_t i = 0; i < numBindings; ++i) {
if (bindings[i].binding != other.bindings[i].binding) {
return false;
}
if (bindings[i].descriptorType != other.bindings[i].descriptorType) {
return false;
}
if (bindings[i].descriptorCount != other.bindings[i].descriptorCount) {
return false;
}
if (bindings[i].stageFlags != other.bindings[i].stageFlags) {
return false;
}
}
return true;
}
size_t DescriptorLayoutCache::LayoutInfo::hash() const {
size_t result = std::hash<size_t>{}(numBindings);
for (size_t i = 0; i < numBindings; ++i) {
auto& bnd = bindings[i];
size_t bindingHash = bnd.binding | (bnd.descriptorType << 8) | (bnd.descriptorCount << 16)
| (bnd.stageFlags << 24);
result ^= std::hash<size_t>{}(bindingHash);
}
result ^= static_cast<size_t>(hasUnboundedArray);
return result;
}
// DescriptorBuilder
DescriptorBuilder::DescriptorBuilder(DescriptorLayoutCache& layoutCache,
DescriptorAllocator& allocator)
: m_dynamicArrayIndex(~0u)
, m_layoutCache(layoutCache)
, m_allocator(allocator) {}
DescriptorBuilder& DescriptorBuilder::bind_buffer(uint32_t binding,
const VkDescriptorBufferInfo& info, VkDescriptorType descriptorType,
VkShaderStageFlags stageFlags) {
return bind_buffers(binding, &info, 1, descriptorType, stageFlags);
}
DescriptorBuilder& DescriptorBuilder::bind_buffers(uint32_t binding,
const VkDescriptorBufferInfo* info, uint32_t count, VkDescriptorType descriptorType,
VkShaderStageFlags stageFlags) {
VkDescriptorSetLayoutBinding bindingInfo{};
bindingInfo.descriptorCount = count;
bindingInfo.descriptorType = descriptorType;
bindingInfo.stageFlags = stageFlags;
bindingInfo.binding = binding;
m_bindings.emplace_back(std::move(bindingInfo));
VkWriteDescriptorSet writeInfo{};
writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writeInfo.descriptorCount = 1;
writeInfo.descriptorType = descriptorType;
writeInfo.pBufferInfo = info;
writeInfo.dstBinding = binding;
m_writes.emplace_back(std::move(writeInfo));
return *this;
}
DescriptorBuilder& DescriptorBuilder::bind_image(uint32_t binding,
const VkDescriptorImageInfo& info, VkDescriptorType descriptorType,
VkShaderStageFlags stageFlags) {
return bind_images(binding, &info, 1, descriptorType, stageFlags);
}
DescriptorBuilder& DescriptorBuilder::bind_images(uint32_t binding,
const VkDescriptorImageInfo* info, uint32_t count, VkDescriptorType descriptorType,
VkShaderStageFlags stageFlags) {
VkDescriptorSetLayoutBinding bindingInfo{};
bindingInfo.descriptorCount = count;
bindingInfo.descriptorType = descriptorType;
bindingInfo.stageFlags = stageFlags;
bindingInfo.binding = binding;
m_bindings.emplace_back(std::move(bindingInfo));
VkWriteDescriptorSet writeInfo{};
writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writeInfo.descriptorCount = count;
writeInfo.descriptorType = descriptorType;
writeInfo.pImageInfo = info;
writeInfo.dstBinding = binding;
m_writes.emplace_back(std::move(writeInfo));
return *this;
}
DescriptorBuilder& DescriptorBuilder::bind_dynamic_array(uint32_t binding, uint32_t maxSize,
VkDescriptorType descriptorType, VkShaderStageFlags stageFlags) {
VkDescriptorSetLayoutBinding bindingInfo{};
bindingInfo.descriptorCount = maxSize;
bindingInfo.descriptorType = descriptorType;
bindingInfo.stageFlags = stageFlags;
bindingInfo.binding = binding;
m_dynamicArrayIndex = static_cast<uint32_t>(m_bindings.size());
m_bindings.emplace_back(std::move(bindingInfo));
return *this;
}
VkDescriptorSet DescriptorBuilder::build() {
VkDescriptorSetLayoutCreateInfo layoutInfo{};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.pBindings = m_bindings.data();
layoutInfo.bindingCount = static_cast<uint32_t>(m_bindings.size());
std::vector<VkDescriptorBindingFlags> flags;
VkDescriptorSetLayoutBindingFlagsCreateInfo bindingFlags{};
uint32_t dynamicCount = 0u;
if (m_dynamicArrayIndex != ~0u) {
dynamicCount = m_bindings[m_dynamicArrayIndex].descriptorCount;
layoutInfo.flags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT;
flags.resize(m_bindings.size());
flags[m_dynamicArrayIndex] = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT
| VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT
| VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT;
bindingFlags.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO;
bindingFlags.bindingCount = layoutInfo.bindingCount;
bindingFlags.pBindingFlags = flags.data();
layoutInfo.pNext = &bindingFlags;
}
VkDescriptorSetLayout layout = m_layoutCache.get(layoutInfo);
VkDescriptorSet descriptorSet = m_allocator.allocate(layout, dynamicCount);
for (auto& write : m_writes) {
write.dstSet = descriptorSet;
}
vkUpdateDescriptorSets(m_allocator.get_device(), static_cast<uint32_t>(m_writes.size()),
m_writes.data(), 0, nullptr);
return descriptorSet;
}
|
whupdup/frame
|
real/graphics/descriptors.cpp
|
C++
|
gpl-3.0
| 10,387
|
#pragma once
#include <core/common.hpp>
#include <volk.h>
#include <unordered_map>
#include <vector>
namespace ZN::GFX {
class DescriptorAllocator final {
public:
explicit DescriptorAllocator(VkDevice, VkDescriptorPoolCreateFlags flags = 0);
~DescriptorAllocator();
NULL_COPY_AND_ASSIGN(DescriptorAllocator);
VkDescriptorSet allocate(VkDescriptorSetLayout, uint32_t dynamicArrayCount = 0);
void reset_pools();
VkDevice get_device() const;
private:
std::vector<VkDescriptorPool> m_pools;
std::vector<VkDescriptorPool> m_freePools;
VkDescriptorPool m_currentPool;
VkDevice m_device;
VkDescriptorPoolCreateFlags m_createFlags;
VkDescriptorPool create_pool();
};
class DescriptorLayoutCache final {
public:
explicit DescriptorLayoutCache(VkDevice);
~DescriptorLayoutCache();
NULL_COPY_AND_ASSIGN(DescriptorLayoutCache);
struct LayoutInfo {
VkDescriptorSetLayoutBinding bindings[8];
size_t numBindings;
bool hasUnboundedArray;
bool operator==(const LayoutInfo&) const;
size_t hash() const;
};
VkDescriptorSetLayout get(const VkDescriptorSetLayoutCreateInfo&);
private:
struct LayoutInfoHash {
size_t operator()(const LayoutInfo& info) const {
return info.hash();
}
};
std::unordered_map<LayoutInfo, VkDescriptorSetLayout, LayoutInfoHash> m_setLayouts;
VkDevice m_device;
};
class DescriptorBuilder final {
public:
explicit DescriptorBuilder(DescriptorLayoutCache&, DescriptorAllocator&);
NULL_COPY_AND_ASSIGN(DescriptorBuilder);
DescriptorBuilder& bind_buffer(uint32_t binding, const VkDescriptorBufferInfo&,
VkDescriptorType, VkShaderStageFlags);
DescriptorBuilder& bind_buffers(uint32_t binding, const VkDescriptorBufferInfo*,
uint32_t count, VkDescriptorType, VkShaderStageFlags);
DescriptorBuilder& bind_image(uint32_t binding, const VkDescriptorImageInfo&,
VkDescriptorType, VkShaderStageFlags);
DescriptorBuilder& bind_images(uint32_t binding, const VkDescriptorImageInfo*,
uint32_t count, VkDescriptorType, VkShaderStageFlags);
DescriptorBuilder& bind_dynamic_array(uint32_t binding, uint32_t maxSize,
VkDescriptorType, VkShaderStageFlags);
VkDescriptorSet build();
private:
// FIXME: make this frame-local memory
std::vector<VkWriteDescriptorSet> m_writes;
std::vector<VkDescriptorSetLayoutBinding> m_bindings;
uint32_t m_dynamicArrayIndex;
DescriptorLayoutCache& m_layoutCache;
DescriptorAllocator& m_allocator;
};
}
|
whupdup/frame
|
real/graphics/descriptors.hpp
|
C++
|
gpl-3.0
| 2,469
|
#include "fence.hpp"
#include <graphics/render_context.hpp>
using namespace ZN;
using namespace ZN::GFX;
Memory::IntrusivePtr<Fence> Fence::create(VkFenceCreateFlags flags) {
return create(g_renderContext->get_device(), flags);
}
Memory::IntrusivePtr<Fence> Fence::create(VkDevice device, VkFenceCreateFlags flags) {
VkFenceCreateInfo createInfo{
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext = nullptr,
.flags = flags
};
VkFence fence;
if (vkCreateFence(device, &createInfo, nullptr, &fence) == VK_SUCCESS) {
return Memory::IntrusivePtr<Fence>(new Fence(fence));
}
return {};
}
Fence::Fence(VkFence fence)
: m_fence(fence) {}
Fence::~Fence() {
vkDestroyFence(g_renderContext->get_device(), m_fence, nullptr);
}
void Fence::reset() {
vkResetFences(g_renderContext->get_device(), 1, &m_fence);
}
void Fence::wait(uint64_t timeout) {
vkWaitForFences(g_renderContext->get_device(), 1, &m_fence, VK_TRUE, timeout);
}
VkResult Fence::get_status() const {
return vkGetFenceStatus(g_renderContext->get_device(), m_fence);
}
VkFence Fence::get_fence() const {
return m_fence;
}
Fence::operator VkFence() const {
return m_fence;
}
|
whupdup/frame
|
real/graphics/fence.cpp
|
C++
|
gpl-3.0
| 1,166
|
#pragma once
#include <core/intrusive_ptr.hpp>
#include <volk.h>
namespace ZN::GFX {
class Fence final : public Memory::IntrusivePtrEnabled<Fence> {
public:
static Memory::IntrusivePtr<Fence> create(VkFenceCreateFlags flags = 0);
static Memory::IntrusivePtr<Fence> create(VkDevice, VkFenceCreateFlags flags = 0);
~Fence();
NULL_COPY_AND_ASSIGN(Fence);
void reset();
void wait(uint64_t timeout = UINT64_MAX);
VkResult get_status() const;
VkFence get_fence() const;
operator VkFence() const;
private:
VkFence m_fence;
explicit Fence(VkFence);
};
}
|
whupdup/frame
|
real/graphics/fence.hpp
|
C++
|
gpl-3.0
| 582
|
#include "frame_graph.hpp"
#include <graphics/barrier_info_collection.hpp>
#include <graphics/command_buffer.hpp>
#include <graphics/render_context.hpp>
#include <graphics/vk_common.hpp>
#include <Tracy.hpp>
#include <TracyVulkan.hpp>
#include <algorithm>
using namespace ZN;
using namespace ZN::GFX;
static bool has_incompatible_depth_attachments(const FrameGraphPass& pA, const FrameGraphPass& pB);
static void emit_pass_barriers(const BaseFrameGraphPass& pass, BarrierInfoCollection& barrierInfo,
FrameGraph::AccessTracker& lastAccesses);
static VkAccessFlags image_access_to_access_flags(VkPipelineStageFlags stageFlags,
ResourceAccess::Enum access);
static VkAccessFlags buffer_access_to_access_flags(VkPipelineStageFlags stageFlags,
ResourceAccess::Enum access, VkBufferUsageFlags usage);
// AccessTracker
class FrameGraph::AccessTracker {
public:
ImageResourceTracker::BarrierMode get_barrier_mode(const Memory::IntrusivePtr<ImageView>&
pImageView) const {
if (auto it = m_imageAccesses.find(pImageView); it != m_imageAccesses.end()
&& (it->second & ResourceAccess::WRITE)) {
return ImageResourceTracker::BarrierMode::ALWAYS;
}
return ImageResourceTracker::BarrierMode::TRANSITIONS_ONLY;
}
BufferResourceTracker::BarrierMode get_barrier_mode(const Memory::IntrusivePtr<Buffer>&
pBuffer) const {
if (auto it = m_bufferAccesses.find(pBuffer); it != m_bufferAccesses.end()
&& (it->second & ResourceAccess::WRITE)) {
return BufferResourceTracker::BarrierMode::ALWAYS;
}
return BufferResourceTracker::BarrierMode::NEVER;
}
void update_access(const Memory::IntrusivePtr<ImageView>& pImageView,
ResourceAccess::Enum access) {
m_imageAccesses.insert_or_assign(pImageView, access);
}
void update_access(const Memory::IntrusivePtr<Buffer>& pBuffer,
ResourceAccess::Enum access) {
m_bufferAccesses.insert_or_assign(pBuffer, access);
}
private:
std::unordered_map<Memory::IntrusivePtr<ImageView>, ResourceAccess::Enum> m_imageAccesses;
std::unordered_map<Memory::IntrusivePtr<Buffer>, ResourceAccess::Enum> m_bufferAccesses;
};
// RenderPassInterval
FrameGraph::RenderPassInterval::RenderPassInterval(uint32_t startIndex, uint32_t endIndex)
: m_startIndex(startIndex)
, m_endIndex(endIndex)
, m_depthAttachment{}
, m_createInfo{} {}
void FrameGraph::RenderPassInterval::register_pass(const FrameGraphPass& pass) {
m_subpasses.emplace_back();
auto& subpass = m_subpasses.back();
if (pass.has_depth_attachment()) {
auto access = pass.get_depth_stencil_info().access;
if (!m_depthAttachment) {
m_depthAttachment = pass.get_depth_stencil_resource();
m_createInfo.depthAttachment = {
.format = m_depthAttachment->get_image().get_format(),
.samples = m_depthAttachment->get_image().get_sample_count()
};
if (pass.get_depth_stencil_info().clear) {
m_createInfo.createFlags |= RenderPass::CREATE_FLAG_CLEAR_DEPTH_STENCIL;
}
else if (access & ResourceAccess::READ) {
m_createInfo.createFlags |= RenderPass::CREATE_FLAG_LOAD_DEPTH_STENCIL;
}
}
if (access & ResourceAccess::WRITE) {
m_createInfo.createFlags |= RenderPass::CREATE_FLAG_STORE_DEPTH_STENCIL;
}
else {
m_createInfo.createFlags &= ~RenderPass::CREATE_FLAG_STORE_DEPTH_STENCIL;
}
switch (access) {
case ResourceAccess::READ:
subpass.depthStencilUsage = RenderPass::DepthStencilUsage::READ;
break;
case ResourceAccess::WRITE:
subpass.depthStencilUsage = RenderPass::DepthStencilUsage::WRITE;
break;
case ResourceAccess::READ_WRITE:
subpass.depthStencilUsage = RenderPass::DepthStencilUsage::READ_WRITE;
break;
default:
subpass.depthStencilUsage = RenderPass::DepthStencilUsage::NONE;
}
}
pass.for_each_color_attachment([&](const auto& pImageView, const auto& res) {
register_color_attachment(pImageView, res.clear, res.access, subpass.colorAttachments,
subpass.colorAttachmentCount);
});
pass.for_each_input_attachment([&](const auto& pImageView) {
register_color_attachment(pImageView, false, ResourceAccess::READ,
subpass.inputAttachments, subpass.inputAttachmentCount);
});
}
void FrameGraph::RenderPassInterval::increment_end_index() {
++m_endIndex;
}
void FrameGraph::RenderPassInterval::emit_barriers(AccessTracker& lastAccesses,
BarrierInfoCollection& barrierInfo) const {
for (size_t i = 0; i < m_colorAttachments.size(); ++i) {
auto& pImageView = m_colorAttachments[i];
bool loadAttachment = (m_createInfo.loadAttachmentMask >> i) & 1;
auto layout = pImageView->is_swapchain_image() ? VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
emit_barriers_for_image(lastAccesses, barrierInfo, pImageView, loadAttachment,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, layout,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
}
if (m_depthAttachment) {
bool loadAttachment = m_createInfo.createFlags
& RenderPass::CREATE_FLAG_LOAD_DEPTH_STENCIL;
emit_barriers_for_image(lastAccesses, barrierInfo, m_depthAttachment, loadAttachment,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
| VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
}
}
FrameGraph::RenderPassData FrameGraph::RenderPassInterval::build() {
m_createInfo.pSubpasses = m_subpasses.data();
m_createInfo.subpassCount = static_cast<uint8_t>(m_subpasses.size());
auto renderPass = RenderPass::create(m_createInfo);
if (m_depthAttachment) {
m_colorAttachments.emplace_back(std::move(m_depthAttachment));
m_depthAttachment = {};
}
auto attachCount = m_colorAttachments.size();
auto extent = m_colorAttachments.back()->get_image().get_extent();
auto framebuffer = Framebuffer::create(*renderPass, std::move(m_colorAttachments),
extent.width, extent.height);
return {
.renderPass = std::move(renderPass),
.framebuffer = std::move(framebuffer),
.clearValues = std::move(m_clearValues),
.startIndex = m_startIndex,
.endIndex = m_endIndex
};
}
void FrameGraph::RenderPassInterval::register_color_attachment(
const Memory::IntrusivePtr<ImageView>& pImageView, bool clear, ResourceAccess::Enum access,
RenderPass::AttachmentIndex_T* attachIndices,
RenderPass::AttachmentCount_T& attachCountInOut) {
auto mask = static_cast<RenderPass::AttachmentBitMask_T>(1u << m_colorAttachments.size());
auto idx = get_color_attachment_index(pImageView);
if (idx == RenderPass::INVALID_ATTACHMENT_INDEX) {
if (clear) {
m_createInfo.clearAttachmentMask |= mask;
}
else if (access & ResourceAccess::READ) {
m_createInfo.loadAttachmentMask |= mask;
}
m_createInfo.colorAttachments[m_createInfo.colorAttachmentCount] = {
.format = pImageView->get_image().get_format(),
.samples = pImageView->get_image().get_sample_count()
};
idx = static_cast<RenderPass::AttachmentIndex_T>(m_createInfo.colorAttachmentCount);
m_colorAttachments.push_back(pImageView);
++m_createInfo.colorAttachmentCount;
if (pImageView->is_swapchain_image()) {
m_createInfo.swapchainAttachmentMask |= mask;
}
}
attachIndices[attachCountInOut++] = idx;
if (access & ResourceAccess::WRITE) {
m_createInfo.storeAttachmentMask |= mask;
}
else {
m_createInfo.storeAttachmentMask &= ~mask;
}
}
void FrameGraph::RenderPassInterval::emit_barriers_for_image(AccessTracker& lastAccesses,
BarrierInfoCollection& barrierInfo, const Memory::IntrusivePtr<ImageView>& pImageView,
bool loadImage, VkPipelineStageFlags stageFlags, VkImageLayout layout,
VkAccessFlags accessMask) const {
auto barrierMode = lastAccesses.get_barrier_mode(pImageView);
if (barrierMode == ImageResourceTracker::BarrierMode::ALWAYS || loadImage) {
pImageView->get_image().update_subresources(barrierInfo, {
.stageFlags = stageFlags,
.layout = layout,
.accessMask = accessMask,
.queueFamilyIndex = g_renderContext->get_graphics_queue_family(),
.range = pImageView->get_subresource_range()
}, barrierMode, !loadImage);
}
pImageView->get_image().update_subresources(barrierInfo, {
.stageFlags = stageFlags,
.layout = layout,
.accessMask = accessMask,
.queueFamilyIndex = g_renderContext->get_graphics_queue_family(),
.range = pImageView->get_subresource_range()
}, ImageResourceTracker::BarrierMode::NEVER, false);
lastAccesses.update_access(pImageView, ResourceAccess::READ_WRITE);
}
uint32_t FrameGraph::RenderPassInterval::get_start_index() const {
return m_startIndex;
}
uint32_t FrameGraph::RenderPassInterval::get_end_index() const {
return m_endIndex;
}
RenderPass::AttachmentIndex_T FrameGraph::RenderPassInterval::get_color_attachment_index(
const Memory::IntrusivePtr<ImageView>& pImageView) const {
for (RenderPass::AttachmentIndex_T i = 0; i < m_createInfo.colorAttachmentCount; ++i) {
if (pImageView == m_colorAttachments[i]) {
return i;
}
}
return RenderPass::INVALID_ATTACHMENT_INDEX;
}
// FrameGraph
TransferPass& FrameGraph::add_transfer_pass() {
m_transferPasses.emplace_back(std::make_unique<TransferPass>());
return *m_transferPasses.back();
}
FrameGraphPass& FrameGraph::add_pass() {
m_passes.emplace_back(std::make_unique<FrameGraphPass>());
return *m_passes.back();
}
void FrameGraph::clear() {
m_passes.clear();
m_transferPasses.clear();
m_renderPassIntervals.clear();
m_renderPasses.clear();
}
void FrameGraph::build(CommandBuffer& graphicsCmd, CommandBuffer& transferCmd) {
ZoneScopedN("Build Frame Graph");
AccessTracker lastAccesses{};
std::vector<BarrierInfoCollection> transferBarrierInfos(m_transferPasses.size());
std::vector<BarrierInfoCollection> graphicsBarrierInfos(m_passes.size());
merge_render_passes();
build_transfer_barriers(lastAccesses, transferBarrierInfos);
build_graphics_barriers(lastAccesses, graphicsBarrierInfos);
build_physical_render_passes();
emit_transfer_commands(transferCmd, transferBarrierInfos);
emit_graphics_commands(graphicsCmd, graphicsBarrierInfos);
}
void FrameGraph::merge_render_passes() {
ZoneScopedN("Merge render passes");
for (uint32_t i = 0, l = static_cast<uint32_t>(m_passes.size()); i < l - 1; ++i) {
auto& pA = *m_passes[i];
if (!pA.is_render_pass()) {
continue;
}
if (m_passes[i + 1]->is_render_pass() && can_merge_render_passes(pA, *m_passes[i + 1])) {
auto renderPassIndex = pA.get_render_pass_index();
if (renderPassIndex == FrameGraphPass::INVALID_RENDER_PASS_INDEX) {
renderPassIndex = static_cast<uint32_t>(m_renderPassIntervals.size());
pA.set_render_pass_index(renderPassIndex);
m_renderPassIntervals.emplace_back(i, i + 2);
m_renderPassIntervals.back().register_pass(pA);
}
else {
m_renderPassIntervals[renderPassIndex].increment_end_index();
}
m_passes[i + 1]->set_render_pass_index(renderPassIndex);
m_renderPassIntervals[renderPassIndex].register_pass(*m_passes[i + 1]);
continue;
}
bool mergedPass = false;
for (uint32_t j = i + 2; j < l; ++j) {
auto& pB = *m_passes[j];
if (!pB.is_render_pass()) {
continue;
}
if (can_merge_render_passes(pA, pB) && can_swap_passes(i + 1, j)) {
mergedPass = true;
auto renderPassIndex = pA.get_render_pass_index();
if (pA.get_render_pass_index() == FrameGraphPass::INVALID_RENDER_PASS_INDEX) {
renderPassIndex = static_cast<uint32_t>(m_renderPassIntervals.size());
pA.set_render_pass_index(renderPassIndex);
m_renderPassIntervals.emplace_back(i, i + 2);
m_renderPassIntervals.back().register_pass(pA);
}
else {
m_renderPassIntervals[renderPassIndex].increment_end_index();
}
pB.set_render_pass_index(renderPassIndex);
m_renderPassIntervals[renderPassIndex].register_pass(pB);
std::swap(m_passes[i + 1], m_passes[j]);
break;
}
}
if (!mergedPass
&& pA.get_render_pass_index() == FrameGraphPass::INVALID_RENDER_PASS_INDEX) {
pA.set_render_pass_index(static_cast<uint32_t>(m_renderPassIntervals.size()));
m_renderPassIntervals.emplace_back(i, i + 1);
m_renderPassIntervals.back().register_pass(pA);
}
}
if (!m_passes.empty() && m_passes.back()->is_render_pass()
&& m_passes.back()->get_render_pass_index()
== FrameGraphPass::INVALID_RENDER_PASS_INDEX) {
m_passes.back()->set_render_pass_index(static_cast<uint32_t>(
m_renderPassIntervals.size()));
auto idx = static_cast<uint32_t>(m_passes.size() - 1);
m_renderPassIntervals.emplace_back(idx, idx + 1);
m_renderPassIntervals.back().register_pass(*m_passes.back());
}
}
void FrameGraph::build_graphics_barriers(AccessTracker& lastAccesses,
std::vector<BarrierInfoCollection>& barrierInfos) {
for (uint32_t i = 0, l = static_cast<uint32_t>(m_passes.size()); i < l;) {
auto& barrierInfo = barrierInfos[i];
auto rpIdx = m_passes[i]->get_render_pass_index();
if (rpIdx != FrameGraphPass::INVALID_RENDER_PASS_INDEX) {
auto& interval = m_renderPassIntervals[rpIdx];
interval.emit_barriers(lastAccesses, barrierInfo);
for (; i < interval.get_end_index(); ++i) {
emit_pass_barriers(*m_passes[i], barrierInfo, lastAccesses);
}
}
else {
emit_pass_barriers(*m_passes[i], barrierInfo, lastAccesses);
++i;
}
}
}
void FrameGraph::build_transfer_barriers(AccessTracker& lastAccesses,
std::vector<BarrierInfoCollection>& barrierInfos) {
for (uint32_t i = 0, l = static_cast<uint32_t>(m_transferPasses.size()); i < l; ++i) {
auto& pass = *m_transferPasses[i];
auto& barrierInfo = barrierInfos[i];
emit_pass_barriers(pass, barrierInfo, lastAccesses);
}
}
void FrameGraph::build_physical_render_passes() {
ZoneScopedN("Build Physical RenderPasses");
m_renderPasses.reserve(m_renderPassIntervals.size());
for (auto& interval : m_renderPassIntervals) {
m_renderPasses.emplace_back(interval.build());
}
}
void FrameGraph::emit_graphics_commands(CommandBuffer& cmd,
std::vector<BarrierInfoCollection>& barrierInfos) {
ZoneScopedN("Emit Graphics Commands");
ZN_TRACY_VK_ZONE(cmd, "All Frame");
for (uint32_t i = 0, l = static_cast<uint32_t>(m_passes.size()); i < l;) {
auto& barrierInfo = barrierInfos[i];
auto rpIdx = m_passes[i]->get_render_pass_index();
barrierInfo.emit_barriers(cmd);
if (rpIdx != FrameGraphPass::INVALID_RENDER_PASS_INDEX) {
auto& rpData = m_renderPasses[rpIdx];
cmd.begin_render_pass(*rpData.renderPass, *rpData.framebuffer,
static_cast<uint32_t>(rpData.clearValues.size()), rpData.clearValues.data());
for (; i < rpData.endIndex; ++i) {
m_passes[i]->write_commands(cmd);
if (i != rpData.endIndex - 1) {
cmd.next_subpass();
}
}
cmd.end_render_pass();
}
else {
m_passes[i]->write_commands(cmd);
++i;
}
}
}
void FrameGraph::emit_transfer_commands(CommandBuffer& cmd,
std::vector<BarrierInfoCollection>& barrierInfos) {
for (uint32_t i = 0, l = static_cast<uint32_t>(m_transferPasses.size()); i < l; ++i) {
auto& pass = *m_transferPasses[i];
auto& barrierInfo = barrierInfos[i];
barrierInfo.emit_barriers(cmd);
pass.write_commands(cmd);
}
}
bool FrameGraph::can_swap_passes(uint32_t pI, uint32_t pJ) const {
if (pI > pJ) {
return can_swap_passes(pJ, pI);
}
auto& first = *m_passes[pI];
auto& last = *m_passes[pJ];
if (first.writes_resources_of_pass(last) || last.writes_resources_of_pass(first)) {
return false;
}
for (uint32_t i = pI + 1; i < pJ; ++i) {
auto& pass = *m_passes[i];
if (first.writes_resources_of_pass(pass) || last.writes_resources_of_pass(pass)
|| pass.writes_resources_of_pass(first)
|| pass.writes_resources_of_pass(last)) {
return false;
}
}
return true;
}
bool FrameGraph::can_merge_render_passes(const FrameGraphPass& pA,
const FrameGraphPass& pB) const {
if (pA.writes_non_attachments_of_pass(pB)) {
return false;
}
if (pB.clears_attachments_of_pass(pA)) {
return false;
}
if (pA.get_render_pass_index() == FrameGraphPass::INVALID_RENDER_PASS_INDEX) {
if (has_incompatible_depth_attachments(pA, pB)) {
return false;
}
}
else {
auto& interval = m_renderPassIntervals[pA.get_render_pass_index()];
for (auto i = interval.get_start_index(); i < interval.get_end_index(); ++i) {
if (has_incompatible_depth_attachments(*m_passes[i], pB)) {
return false;
}
}
}
return true;
}
bool FrameGraph::is_first_render_pass(uint32_t index) const {
auto& pass = *m_passes[index];
return pass.get_render_pass_index() != FrameGraphPass::INVALID_RENDER_PASS_INDEX
&& index == m_renderPassIntervals[pass.get_render_pass_index()].get_start_index();
}
static bool has_incompatible_depth_attachments(const FrameGraphPass& pA,
const FrameGraphPass& pB) {
return pA.has_depth_attachment() && pB.has_depth_attachment()
&& !pA.has_depth_attachment(pB.get_depth_stencil_resource());
}
static void emit_pass_barriers(const BaseFrameGraphPass& pass, BarrierInfoCollection& barrierInfo,
FrameGraph::AccessTracker& lastAccesses) {
pass.for_each_texture([&](const auto& pImageView, const auto& dep) {
pImageView->get_image().update_subresources(barrierInfo, {
.stageFlags = dep.stageFlags,
.layout = dep.layout,
.accessMask = image_access_to_access_flags(dep.stageFlags, dep.access),
.queueFamilyIndex = g_renderContext->get_graphics_queue_family(),
.range = pImageView->get_subresource_range()
}, lastAccesses.get_barrier_mode(pImageView), !(dep.access & ResourceAccess::READ));
lastAccesses.update_access(pImageView, dep.access);
});
pass.for_each_buffer([&](const auto& pBuffer, const auto& dep) {
pBuffer->update_subresources(barrierInfo, {
.stageFlags = dep.stageFlags,
.accessMask = buffer_access_to_access_flags(dep.stageFlags, dep.access,
pBuffer->get_usage_flags()),
.offset = dep.offset,
.size = dep.size,
.queueFamilyIndex = g_renderContext->get_graphics_queue_family()
}, lastAccesses.get_barrier_mode(pBuffer), !(dep.access & ResourceAccess::READ));
lastAccesses.update_access(pBuffer, dep.access);
});
}
static VkAccessFlags image_access_to_access_flags(VkPipelineStageFlags stageFlags,
ResourceAccess::Enum access) {
VkAccessFlags result = {};
if (stageFlags & (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
| VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)) {
result |= (((access & ResourceAccess::READ) != 0) * VK_ACCESS_SHADER_READ_BIT)
| (((access & ResourceAccess::WRITE) != 0) * VK_ACCESS_SHADER_WRITE_BIT);
}
if (stageFlags & (VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)) {
result |= (((access & ResourceAccess::READ) != 0)
* VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT)
| (((access & ResourceAccess::WRITE) != 0)
* VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
}
if (stageFlags & VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT) {
result |= (((access & ResourceAccess::READ) != 0) * VK_ACCESS_COLOR_ATTACHMENT_READ_BIT)
| (((access & ResourceAccess::WRITE) != 0) * VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
}
if (stageFlags & VK_PIPELINE_STAGE_TRANSFER_BIT) {
result |= (((access & ResourceAccess::READ) != 0) * VK_ACCESS_TRANSFER_READ_BIT)
| (((access & ResourceAccess::WRITE) != 0) * VK_ACCESS_TRANSFER_WRITE_BIT);
}
return result;
}
static VkAccessFlags buffer_access_to_access_flags(VkPipelineStageFlags stageFlags,
ResourceAccess::Enum access, VkBufferUsageFlags usage) {
VkAccessFlags result = {};
if (stageFlags & VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT) {
result |= (((access & ResourceAccess::READ) != 0) * VK_ACCESS_INDIRECT_COMMAND_READ_BIT);
}
if (stageFlags & VK_PIPELINE_STAGE_VERTEX_INPUT_BIT) {
if (usage & VK_BUFFER_USAGE_INDEX_BUFFER_BIT) {
result |= (((access & ResourceAccess::READ) != 0) * VK_ACCESS_INDEX_READ_BIT);
}
if (usage & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) {
result |= (((access & ResourceAccess::READ) != 0)
* VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
}
}
if (stageFlags & (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
| VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)) {
result |= (((access & ResourceAccess::READ) != 0) * VK_ACCESS_SHADER_READ_BIT)
| (((access & ResourceAccess::WRITE) != 0) * VK_ACCESS_SHADER_WRITE_BIT);
}
if (stageFlags & VK_PIPELINE_STAGE_TRANSFER_BIT) {
result |= (((access & ResourceAccess::READ) != 0) * VK_ACCESS_TRANSFER_READ_BIT)
| (((access & ResourceAccess::WRITE) != 0) * VK_ACCESS_TRANSFER_WRITE_BIT);
}
return result;
}
|
whupdup/frame
|
real/graphics/frame_graph.cpp
|
C++
|
gpl-3.0
| 20,520
|
#pragma once
#include <graphics/framebuffer.hpp>
#include <graphics/frame_graph_pass.hpp>
#include <graphics/render_pass.hpp>
#include <graphics/transfer_pass.hpp>
#include <memory>
namespace ZN::GFX {
class BarrierInfoCollection;
class CommandBuffer;
class FrameGraph final {
public:
class AccessTracker;
struct RenderPassData {
Memory::IntrusivePtr<RenderPass> renderPass;
Memory::IntrusivePtr<Framebuffer> framebuffer;
std::vector<VkClearValue> clearValues;
uint32_t startIndex;
uint32_t endIndex;
};
class RenderPassInterval final {
public:
explicit RenderPassInterval(uint32_t startIndex, uint32_t endIndex);
void register_pass(const FrameGraphPass&);
void increment_end_index();
void emit_barriers(AccessTracker&, BarrierInfoCollection&) const;
RenderPassData build();
uint32_t get_start_index() const;
uint32_t get_end_index() const;
const RenderPass::CreateInfo& get_create_info() const { return m_createInfo; }
RenderPass::AttachmentIndex_T get_color_attachment_index(
const Memory::IntrusivePtr<ImageView>&) const;
private:
uint32_t m_startIndex;
uint32_t m_endIndex;
std::vector<Memory::IntrusivePtr<ImageView>> m_colorAttachments;
Memory::IntrusivePtr<ImageView> m_depthAttachment;
std::vector<RenderPass::SubpassInfo> m_subpasses;
RenderPass::CreateInfo m_createInfo;
std::vector<VkClearValue> m_clearValues;
void register_color_attachment(const Memory::IntrusivePtr<ImageView>&, bool clear,
ResourceAccess::Enum access, RenderPass::AttachmentIndex_T* attachIndices,
RenderPass::AttachmentCount_T& attachCountInOut);
void emit_barriers_for_image(AccessTracker&, BarrierInfoCollection&,
const Memory::IntrusivePtr<ImageView>&, bool loadImage,
VkPipelineStageFlags, VkImageLayout, VkAccessFlags) const;
};
explicit FrameGraph() = default;
NULL_COPY_AND_ASSIGN(FrameGraph);
TransferPass& add_transfer_pass();
FrameGraphPass& add_pass();
void clear();
void build(CommandBuffer& graphicsCmd, CommandBuffer& uploadCmd);
private:
std::vector<std::unique_ptr<FrameGraphPass>> m_passes;
std::vector<std::unique_ptr<TransferPass>> m_transferPasses;
std::vector<RenderPassInterval> m_renderPassIntervals;
std::vector<RenderPassData> m_renderPasses;
void build_transfer_barriers(AccessTracker&, std::vector<BarrierInfoCollection>&);
void merge_render_passes();
void build_graphics_barriers(AccessTracker&, std::vector<BarrierInfoCollection>&);
void build_physical_render_passes();
void emit_transfer_commands(CommandBuffer&, std::vector<BarrierInfoCollection>&);
void emit_graphics_commands(CommandBuffer&, std::vector<BarrierInfoCollection>&);
bool can_swap_passes(uint32_t pI, uint32_t pJ) const;
bool can_merge_render_passes(const FrameGraphPass& pA, const FrameGraphPass& pB) const;
bool is_first_render_pass(uint32_t) const;
};
}
|
whupdup/frame
|
real/graphics/frame_graph.hpp
|
C++
|
gpl-3.0
| 2,931
|
#include "graphics/frame_graph_pass.hpp"
#include <graphics/frame_graph.hpp>
using namespace ZN;
using namespace ZN::GFX;
// FrameGraphPass
FrameGraphPass& FrameGraphPass::add_color_attachment(ImageView& imageView,
ResourceAccess::Enum access) {
m_colorAttachments.emplace(std::make_pair(imageView.reference_from_this(),
AttachmentDependency{
.access = access,
.clear = false
}));
return *this;
}
FrameGraphPass& FrameGraphPass::add_cleared_color_attachment(ImageView& imageView,
ResourceAccess::Enum access, VkClearColorValue clearValue) {
m_colorAttachments.emplace(std::make_pair(imageView.reference_from_this(),
AttachmentDependency{
.clearValue = {.color = std::move(clearValue)},
.access = access,
.clear = true
}));
return *this;
}
FrameGraphPass& FrameGraphPass::add_depth_stencil_attachment(ImageView& imageView,
ResourceAccess::Enum access) {
m_depthStencilImageView = imageView.reference_from_this();
m_depthStencilInfo.access = access;
m_depthStencilInfo.clear = false;
return *this;
}
FrameGraphPass& FrameGraphPass::add_cleared_depth_stencil_attachment(ImageView& imageView,
ResourceAccess::Enum access, VkClearDepthStencilValue clearValue) {
m_depthStencilImageView = imageView.reference_from_this();
m_depthStencilInfo.clearValue.depthStencil = std::move(clearValue);
m_depthStencilInfo.access = access;
m_depthStencilInfo.clear = true;
return *this;
}
FrameGraphPass& FrameGraphPass::add_input_attachment(ImageView& imageView) {
m_inputAttachments.emplace(imageView.reference_from_this());
return *this;
}
bool FrameGraphPass::is_render_pass() const {
return !m_inputAttachments.empty() || !m_colorAttachments.empty() || m_depthStencilImageView;
}
bool FrameGraphPass::writes_resources_of_pass(const FrameGraphPass& other) const {
bool writes = false;
// FIXME: IterationDecision
other.for_each_touched_image_resource([&](auto& img) {
if (writes_image(img)) {
writes = true;
}
});
other.for_each_buffer([&](auto& buf, auto&) {
if (writes_buffer(buf)) {
writes = true;
}
});
return writes;
}
bool FrameGraphPass::writes_non_attachments_of_pass(const FrameGraphPass& other) const {
bool writes = false;
// FIXME: IterationDecision
other.for_each_texture([&](auto& imageView, auto&) {
if (writes_image(imageView)) {
writes = true;
}
});
if (writes) {
return true;
}
other.for_each_buffer([&](auto& buffer, auto&) {
if (writes_buffer(buffer)) {
writes = true;
}
});
return writes;
}
bool FrameGraphPass::clears_attachments_of_pass(const FrameGraphPass& other) const {
if (m_depthStencilImageView && m_depthStencilInfo.clear
&& other.has_attachment(m_depthStencilImageView)) {
return true;
}
for (auto& [imageView, r] : m_colorAttachments) {
if (r.clear && other.has_attachment(imageView)) {
return true;
}
}
return false;
}
bool FrameGraphPass::writes_image_internal(const Memory::IntrusivePtr<ImageView>& img) const {
if (BaseFrameGraphPass::writes_image_internal(img)) {
return true;
}
if (m_depthStencilImageView.get() == img.get()
&& (m_depthStencilInfo.access & ResourceAccess::WRITE)) {
return true;
}
if (auto it = m_colorAttachments.find(img); it != m_colorAttachments.end()) {
return true;
}
return false;
}
bool FrameGraphPass::has_attachment(const Memory::IntrusivePtr<ImageView>& img) const {
if (has_depth_attachment(img)) {
return true;
}
if (auto it = m_colorAttachments.find(img); it != m_colorAttachments.end()) {
return true;
}
return false;
}
bool FrameGraphPass::has_depth_attachment(const Memory::IntrusivePtr<ImageView>& img) const {
return m_depthStencilImageView.get() == img.get();
}
bool FrameGraphPass::has_depth_attachment() const {
return m_depthStencilImageView != nullptr;
}
void FrameGraphPass::set_render_pass_index(uint32_t renderPassIndex) {
m_renderPassIndex = renderPassIndex;
}
Memory::IntrusivePtr<ImageView> FrameGraphPass::get_depth_stencil_resource() const {
return m_depthStencilImageView;
}
const AttachmentDependency& FrameGraphPass::get_depth_stencil_info() const {
return m_depthStencilInfo;
}
uint32_t FrameGraphPass::get_render_pass_index() const {
return m_renderPassIndex;
}
template <typename Functor>
void FrameGraphPass::for_each_touched_image_resource(Functor&& func) const {
for_each_touched_attachment_resource([&](auto& imageView) {
func(imageView);
});
for_each_texture([&](auto& imageView, auto&) {
func(imageView);
});
}
template <typename Functor>
void FrameGraphPass::for_each_touched_attachment_resource(Functor&& func) const {
if (m_depthStencilImageView) {
func(m_depthStencilImageView);
}
for (auto& [imageView, _] : m_colorAttachments) {
func(imageView);
}
for (auto& imageView : m_inputAttachments) {
func(imageView);
}
}
|
whupdup/frame
|
real/graphics/frame_graph_pass.cpp
|
C++
|
gpl-3.0
| 4,805
|
#pragma once
#include <graphics/base_frame_graph_pass.hpp>
#include <vector>
#include <unordered_set>
namespace ZN::GFX {
class FrameGraphPass final : public FrameGraphPassMixin<FrameGraphPass> {
public:
static constexpr const uint32_t INVALID_RENDER_PASS_INDEX = ~0u;
explicit FrameGraphPass() = default;
NULL_COPY_AND_ASSIGN(FrameGraphPass);
FrameGraphPass& add_color_attachment(ImageView& imageView, ResourceAccess::Enum access);
FrameGraphPass& add_cleared_color_attachment(ImageView& imageView,
ResourceAccess::Enum access, VkClearColorValue clearValue);
FrameGraphPass& add_depth_stencil_attachment(ImageView& imageView,
ResourceAccess::Enum access);
FrameGraphPass& add_cleared_depth_stencil_attachment(ImageView& imageView,
ResourceAccess::Enum access, VkClearDepthStencilValue clearValue);
FrameGraphPass& add_input_attachment(ImageView& imageView);
bool is_render_pass() const;
bool writes_resources_of_pass(const FrameGraphPass& other) const;
bool writes_non_attachments_of_pass(const FrameGraphPass& other) const;
bool clears_attachments_of_pass(const FrameGraphPass& other) const;
bool has_attachment(const Memory::IntrusivePtr<ImageView>& img) const;
bool has_depth_attachment(const Memory::IntrusivePtr<ImageView>& img) const;
bool has_depth_attachment() const;
Memory::IntrusivePtr<ImageView> get_depth_stencil_resource() const;
const AttachmentDependency& get_depth_stencil_info() const;
void set_render_pass_index(uint32_t renderPassIndex);
uint32_t get_render_pass_index() const;
template <typename Functor>
void for_each_color_attachment(Functor&& func) const {
for (auto& [imageView, r] : m_colorAttachments) {
func(imageView, r);
}
}
template <typename Functor>
void for_each_input_attachment(Functor&& func) const {
for (auto& imageView : m_inputAttachments) {
func(imageView);
}
}
bool writes_image_internal(const Memory::IntrusivePtr<ImageView>& img) const;
private:
std::unordered_map<Memory::IntrusivePtr<ImageView>, AttachmentDependency>
m_colorAttachments;
std::unordered_set<Memory::IntrusivePtr<ImageView>> m_inputAttachments;
Memory::IntrusivePtr<ImageView> m_depthStencilImageView = {};
AttachmentDependency m_depthStencilInfo;
uint32_t m_renderPassIndex = INVALID_RENDER_PASS_INDEX;
template <typename Functor>
void for_each_touched_image_resource(Functor&& func) const;
template <typename Functor>
void for_each_touched_attachment_resource(Functor&& func) const;
template <typename Functor>
void for_each_touched_buffer_resource(Functor&& func) const;
};
}
|
whupdup/frame
|
real/graphics/frame_graph_pass.hpp
|
C++
|
gpl-3.0
| 2,625
|
#include "framebuffer.hpp"
#include <core/hash_builder.hpp>
#include <core/scoped_lock.hpp>
#include <graphics/render_context.hpp>
#include <graphics/render_pass.hpp>
#include <Tracy.hpp>
#include <unordered_map>
using namespace ZN;
using namespace ZN::GFX;
namespace ZN::GFX {
struct FramebufferKey {
uint64_t renderPass;
uint64_t attachmentIDs[RenderPass::MAX_COLOR_ATTACHMENTS + 1];
RenderPass::AttachmentCount_T attachmentCount;
explicit FramebufferKey(uint64_t renderPassID,
const std::vector<IntrusivePtr<ImageView>>& attachments)
: renderPass(renderPassID)
, attachmentCount(static_cast<RenderPass::AttachmentCount_T>(attachments.size())) {
for (size_t i = 0; i < attachments.size(); ++i) {
attachmentIDs[i] = attachments[i]->get_unique_id();
}
}
DEFAULT_COPY_AND_ASSIGN(FramebufferKey);
bool operator==(const FramebufferKey& other) const {
if (renderPass != other.renderPass || attachmentCount != other.attachmentCount) {
return false;
}
for (RenderPass::AttachmentCount_T i = 0; i < attachmentCount; ++i) {
if (attachmentIDs[i] != other.attachmentIDs[i]) {
return false;
}
}
return true;
}
};
struct FramebufferKeyHash {
uint64_t operator()(const FramebufferKey& key) const {
HashBuilder hb{};
hb.add_uint64(key.renderPass);
hb.add_uint32(static_cast<uint32_t>(key.attachmentCount));
for (RenderPass::AttachmentCount_T i = 0; i < key.attachmentCount; ++i) {
hb.add_uint64(key.attachmentIDs[i]);
}
return hb.get();
}
};
}
static int64_t g_counter = 0;
static std::unordered_map<FramebufferKey, Framebuffer*, FramebufferKeyHash> g_framebufferCache{};
static IntrusivePtr<Scheduler::Mutex> g_mutex = Scheduler::Mutex::create();
static VkFramebuffer framebuffer_create(VkRenderPass renderPass,
const std::vector<IntrusivePtr<ImageView>>& attachments, uint32_t width,
uint32_t height);
IntrusivePtr<Framebuffer> Framebuffer::create(RenderPass& renderPass,
std::vector<IntrusivePtr<ImageView>> attachments, uint32_t width,
uint32_t height) {
FramebufferKey key(renderPass.get_unique_id(), attachments);
ScopedLock lock(*g_mutex);
if (auto it = g_framebufferCache.find(key); it != g_framebufferCache.end()) {
return it->second->reference_from_this();
}
else {
lock.unlock();
ZoneScopedN("Create Framebuffer");
auto framebuffer = framebuffer_create(renderPass, attachments, width, height);
if (framebuffer != VK_NULL_HANDLE) {
auto result = Memory::IntrusivePtr(new Framebuffer(framebuffer, std::move(attachments),
renderPass.get_unique_id(), width, height));
lock.lock();
g_framebufferCache.emplace(std::make_pair(std::move(key), result.get()));
lock.unlock();
return result;
}
}
return nullptr;
}
Framebuffer::Framebuffer(VkFramebuffer framebuffer,
std::vector<IntrusivePtr<ImageView>> imageViews, uint64_t renderPassID,
uint32_t width, uint32_t height)
: m_framebuffer(framebuffer)
, m_imageViews(std::move(imageViews))
, m_renderPassID(renderPassID)
, m_width(width)
, m_height(height) {
++g_counter;
TracyPlot("Framebuffers", g_counter);
}
Framebuffer::~Framebuffer() {
--g_counter;
TracyPlot("Framebuffers", g_counter);
if (m_framebuffer != VK_NULL_HANDLE) {
g_renderContext->queue_delete([framebuffer=this->m_framebuffer] {
vkDestroyFramebuffer(g_renderContext->get_device(), framebuffer, nullptr);
});
}
ScopedLock lock(*g_mutex);
g_framebufferCache.erase(FramebufferKey{m_renderPassID, m_imageViews});
}
Framebuffer::operator VkFramebuffer() const {
return m_framebuffer;
}
VkFramebuffer Framebuffer::get_framebuffer() const {
return m_framebuffer;
}
const std::vector<IntrusivePtr<ImageView>>& Framebuffer::get_image_views() const {
return m_imageViews;
}
uint32_t Framebuffer::get_width() const {
return m_width;
}
uint32_t Framebuffer::get_height() const {
return m_height;
}
static VkFramebuffer framebuffer_create(VkRenderPass renderPass,
const std::vector<IntrusivePtr<ImageView>>& attachments, uint32_t width, uint32_t height) {
VkImageView attachViews[RenderPass::MAX_COLOR_ATTACHMENTS + 1] = {};
for (size_t i = 0; i < attachments.size(); ++i) {
attachViews[i] = *attachments[i];
}
VkFramebuffer framebuffer{};
VkFramebufferCreateInfo createInfo{
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.renderPass = renderPass,
.attachmentCount = static_cast<uint32_t>(attachments.size()),
.pAttachments = attachViews,
.width = width,
.height = height,
.layers = 1
};
if (vkCreateFramebuffer(g_renderContext->get_device(), &createInfo, nullptr, &framebuffer)
== VK_SUCCESS) {
return framebuffer;
}
return VK_NULL_HANDLE;
}
|
whupdup/frame
|
real/graphics/framebuffer.cpp
|
C++
|
gpl-3.0
| 4,663
|
#pragma once
#include <graphics/image_view.hpp>
#include <vector>
namespace ZN::GFX {
class RenderPass;
class Framebuffer final : public Memory::ThreadSafeIntrusivePtrEnabled<Framebuffer> {
public:
static Memory::IntrusivePtr<Framebuffer> create(RenderPass&,
std::vector<Memory::IntrusivePtr<ImageView>> attachments, uint32_t width,
uint32_t height);
~Framebuffer();
NULL_COPY_AND_ASSIGN(Framebuffer);
operator VkFramebuffer() const;
VkFramebuffer get_framebuffer() const;
const std::vector<Memory::IntrusivePtr<ImageView>>& get_image_views() const;
uint32_t get_width() const;
uint32_t get_height() const;
private:
VkFramebuffer m_framebuffer;
std::vector<Memory::IntrusivePtr<ImageView>> m_imageViews;
uint64_t m_renderPassID;
uint32_t m_width;
uint32_t m_height;
explicit Framebuffer(VkFramebuffer, std::vector<Memory::IntrusivePtr<ImageView>>,
uint64_t renderPassID, uint32_t width, uint32_t height);
};
}
|
whupdup/frame
|
real/graphics/framebuffer.hpp
|
C++
|
gpl-3.0
| 966
|
#pragma once
#include <cstddef>
namespace ZN::GFX {
class Buffer;
class CommandBuffer;
class CubeMap;
class Font;
class FontFamily;
class Framebuffer;
class Image;
class ImageView;
class RenderContext;
class RenderPass;
class Texture;
constexpr const size_t FRAMES_IN_FLIGHT = 2;
}
|
whupdup/frame
|
real/graphics/graphics_fwd.hpp
|
C++
|
gpl-3.0
| 288
|
#include "graphics_pipeline.hpp"
#include <core/hash_builder.hpp>
#include <graphics/render_context.hpp>
#include <graphics/render_pass.hpp>
#include <Tracy.hpp>
using namespace ZN;
using namespace ZN::GFX;
static int64_t g_counter = 0;
static VkPipeline create_pipeline(const GraphicsPipelineTemplate& tmpl,
const RenderPass& renderPass, uint32_t subpassIndex);
// PipelineKey
bool GraphicsPipeline::PipelineKey::operator==(const PipelineKey& other) const {
return renderPass == other.renderPass && subpassIndex == other.subpassIndex;
}
size_t GraphicsPipeline::PipelineKeyHash::operator()(const PipelineKey& key) const {
return HashBuilder{}
.add_uint64(key.renderPass)
.add_uint32(key.subpassIndex)
.get();
}
// GraphicsPipeline
Memory::IntrusivePtr<GraphicsPipeline> GraphicsPipeline::create(
GraphicsPipelineTemplate&& pipelineTemplate) {
return Memory::IntrusivePtr(new GraphicsPipeline(std::move(pipelineTemplate)));
}
GraphicsPipeline::GraphicsPipeline(GraphicsPipelineTemplate&& pipelineTemplate)
: m_template(std::move(pipelineTemplate)) {}
GraphicsPipeline::~GraphicsPipeline() {
g_counter -= m_pipelines.size();
TracyPlot("GraphicsPipelines", g_counter);
for (auto& [_, pipeline] : m_pipelines) {
g_renderContext->queue_delete([pipelineIn=pipeline] {
vkDestroyPipeline(g_renderContext->get_device(), pipelineIn, nullptr);
});
}
}
VkPipeline GraphicsPipeline::get_pipeline(const RenderPass& renderPass, uint32_t subpassIndex) {
PipelineKey key{renderPass.get_unique_id(), subpassIndex};
if (auto it = m_pipelines.find(key); it != m_pipelines.end()) {
return it->second;
}
else {
++g_counter;
TracyPlot("GraphicsPipelines", g_counter);
auto pipeline = create_pipeline(m_template, renderPass, subpassIndex);
m_pipelines.emplace(std::make_pair(std::move(key), pipeline));
return pipeline;
}
}
VkPipelineLayout GraphicsPipeline::get_layout() const {
return m_template.program->get_pipeline_layout();
}
static VkPipeline create_pipeline(const GraphicsPipelineTemplate& tmpl,
const RenderPass& renderPass, uint32_t subpassIndex) {
ZoneScopedN("Create Graphics Pipeline");
VkPipeline pipeline;
std::vector<VkPipelineShaderStageCreateInfo> stages(tmpl.program->get_num_modules());
for (uint32_t i = 0; i < tmpl.program->get_num_modules(); ++i) {
auto& stage = stages[i];
stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
stage.stage = tmpl.program->get_stage_flags()[i];
stage.module = tmpl.program->get_shader_modules()[i];
stage.pName = "main";
}
VkPipelineVertexInputStateCreateInfo vertexInput {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.vertexBindingDescriptionCount = tmpl.bindingDescriptionCount,
.pVertexBindingDescriptions = tmpl.bindingDescriptions.get(),
.vertexAttributeDescriptionCount = tmpl.attributeDescriptionCount,
.pVertexAttributeDescriptions = tmpl.attributeDescriptions.get(),
};
VkPipelineInputAssemblyStateCreateInfo inputAssembly {
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
};
VkPipelineViewportStateCreateInfo viewportState {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.viewportCount = 1,
.scissorCount = 1,
};
VkPipelineRasterizationStateCreateInfo rasterizer {
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.polygonMode = VK_POLYGON_MODE_FILL,
.cullMode = tmpl.cullMode,
.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
.lineWidth = 1.f,
};
VkPipelineMultisampleStateCreateInfo multisampling {
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT, // FIXME: get from render pass
};
VkPipelineColorBlendAttachmentState colorBlendAttachment {
.blendEnable = (tmpl.flags & GraphicsPipelineTemplate::FLAG_BLEND_ENABLED)
? VK_TRUE : VK_FALSE,
.srcColorBlendFactor = tmpl.srcColorBlendFactor,
.dstColorBlendFactor = tmpl.dstColorBlendFactor,
.colorBlendOp = tmpl.colorBlendOp,
.srcAlphaBlendFactor = tmpl.srcAlphaBlendFactor,
.dstAlphaBlendFactor = tmpl.dstAlphaBlendFactor,
.alphaBlendOp = tmpl.alphaBlendOp,
.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT
| VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
};
VkPipelineColorBlendStateCreateInfo colorBlendState {
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
.logicOpEnable = VK_FALSE,
.attachmentCount = 1,
.pAttachments = &colorBlendAttachment
};
VkPipelineDepthStencilStateCreateInfo depthStencil {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.depthTestEnable = (tmpl.flags & GraphicsPipelineTemplate::FLAG_DEPTH_TEST_ENABLED)
? VK_TRUE : VK_FALSE,
.depthWriteEnable = (tmpl.flags & GraphicsPipelineTemplate::FLAG_DEPTH_WRITE_ENABLED)
? VK_TRUE : VK_FALSE,
.depthCompareOp = tmpl.depthCompareOp,
.maxDepthBounds = 1.f,
};
VkDynamicState dynamicStates[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dynamicState {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.dynamicStateCount = 2,
.pDynamicStates = dynamicStates
};
VkGraphicsPipelineCreateInfo createInfo {
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.stageCount = tmpl.program->get_num_modules(),
.pStages = stages.data(),
.pVertexInputState = &vertexInput,
.pInputAssemblyState = &inputAssembly,
.pViewportState = &viewportState,
.pRasterizationState = &rasterizer,
.pMultisampleState = &multisampling,
.pDepthStencilState = &depthStencil,
.pColorBlendState = &colorBlendState,
.pDynamicState = &dynamicState,
.layout = tmpl.program->get_pipeline_layout(),
.renderPass = renderPass,
.subpass = subpassIndex,
};
if (vkCreateGraphicsPipelines(g_renderContext->get_device(), VK_NULL_HANDLE, 1, &createInfo,
nullptr, &pipeline) == VK_SUCCESS) {
return pipeline;
}
return VK_NULL_HANDLE;
}
|
whupdup/frame
|
real/graphics/graphics_pipeline.cpp
|
C++
|
gpl-3.0
| 6,034
|
#pragma once
#include <core/pair.hpp>
#include <graphics/shader_program.hpp>
#include <graphics/specialization_info.hpp>
#include <memory>
#include <unordered_map>
#include <vector>
namespace ZN::GFX {
class RenderPass;
struct GraphicsPipelineTemplate {
enum Flags : uint32_t {
FLAG_BLEND_ENABLED = 0b0001,
FLAG_DEPTH_TEST_ENABLED = 0b0010,
FLAG_DEPTH_WRITE_ENABLED = 0b0100,
};
std::vector<SpecializationInfo> specializationInfos;
std::vector<Pair<VkShaderStageFlagBits, uint32_t>> specializationIndices;
Memory::IntrusivePtr<ShaderProgram> program;
std::unique_ptr<VkVertexInputAttributeDescription[]> attributeDescriptions = nullptr;
std::unique_ptr<VkVertexInputBindingDescription[]> bindingDescriptions = nullptr;
uint32_t attributeDescriptionCount = 0;
uint32_t bindingDescriptionCount = 0;
VkCullModeFlags cullMode = VK_CULL_MODE_BACK_BIT;
VkBlendOp colorBlendOp = VK_BLEND_OP_ADD;
VkBlendFactor srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
VkBlendFactor dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
VkBlendOp alphaBlendOp = VK_BLEND_OP_ADD;
VkBlendFactor srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
VkBlendFactor dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
VkCompareOp depthCompareOp = VK_COMPARE_OP_ALWAYS;
uint32_t flags = 0;
};
class GraphicsPipeline final : public Memory::ThreadSafeIntrusivePtrEnabled<GraphicsPipeline> {
public:
static Memory::IntrusivePtr<GraphicsPipeline> create(GraphicsPipelineTemplate&&);
~GraphicsPipeline();
NULL_COPY_AND_ASSIGN(GraphicsPipeline);
VkPipeline get_pipeline(const RenderPass&, uint32_t subpassIndex);
VkPipelineLayout get_layout() const;
private:
struct PipelineKey {
uint64_t renderPass;
uint32_t subpassIndex;
bool operator==(const PipelineKey&) const;
};
struct PipelineKeyHash {
size_t operator()(const PipelineKey&) const;
};
std::unordered_map<PipelineKey, VkPipeline, PipelineKeyHash> m_pipelines;
GraphicsPipelineTemplate m_template;
explicit GraphicsPipeline(GraphicsPipelineTemplate&&);
};
class GraphicsPipelineBuilder final {
public:
explicit GraphicsPipelineBuilder() = default;
NULL_COPY_AND_ASSIGN(GraphicsPipelineBuilder);
GraphicsPipelineBuilder& add_program(ShaderProgram&);
GraphicsPipelineBuilder& add_specialization_info(SpecializationInfo&&,
VkShaderStageFlagBits);
GraphicsPipelineBuilder& add_specialization_info(const SpecializationInfo&,
VkShaderStageFlagBits);
GraphicsPipelineBuilder& set_vertex_attribute_descriptions(
const VkVertexInputAttributeDescription* descriptions, uint32_t count);
GraphicsPipelineBuilder& set_vertex_binding_descriptions(
const VkVertexInputBindingDescription* descriptions,
uint32_t count);
template <typename Container>
GraphicsPipelineBuilder& set_vertex_attribute_descriptions(const Container& cont) {
return set_vertex_attribute_descriptions(cont.data(),
static_cast<uint32_t>(cont.size()));
}
template <typename Container>
GraphicsPipelineBuilder& set_vertex_binding_descriptions(const Container& cont) {
return set_vertex_binding_descriptions(cont.data(),
static_cast<uint32_t>(cont.size()));
}
GraphicsPipelineBuilder& set_cull_mode(VkCullModeFlags);
GraphicsPipelineBuilder& set_blend_enabled(bool);
GraphicsPipelineBuilder& set_color_blend(VkBlendOp, VkBlendFactor src, VkBlendFactor dst);
GraphicsPipelineBuilder& set_alpha_blend(VkBlendOp, VkBlendFactor src, VkBlendFactor dst);
GraphicsPipelineBuilder& set_depth_test_enabled(bool);
GraphicsPipelineBuilder& set_depth_write_enabled(bool);
GraphicsPipelineBuilder& set_depth_compare_op(VkCompareOp);
//GraphicsPipelineBuilder& add_dynamic_state(VkDynamicState);
[[nodiscard]] Memory::IntrusivePtr<GraphicsPipeline> build();
private:
GraphicsPipelineTemplate m_template = {};
};
}
|
whupdup/frame
|
real/graphics/graphics_pipeline.hpp
|
C++
|
gpl-3.0
| 3,824
|
#include "graphics_pipeline.hpp"
#include <graphics/shader_program.hpp>
#include <cstring>
using namespace ZN;
using namespace ZN::GFX;
GraphicsPipelineBuilder& GraphicsPipelineBuilder::add_program(ShaderProgram& program) {
m_template.program = program.reference_from_this();
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::add_specialization_info(
SpecializationInfo&& info, VkShaderStageFlagBits stage) {
m_template.specializationIndices.push_back({stage,
static_cast<uint32_t>(m_template.specializationIndices.size())});
m_template.specializationInfos.emplace_back(std::move(info));
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::add_specialization_info(
const SpecializationInfo& info, VkShaderStageFlagBits stage) {
m_template.specializationIndices.push_back({stage,
static_cast<uint32_t>(m_template.specializationIndices.size())});
m_template.specializationInfos.emplace_back(info);
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::set_vertex_attribute_descriptions(
const VkVertexInputAttributeDescription* descriptions, uint32_t count) {
auto ownedDescs = std::make_unique<VkVertexInputAttributeDescription[]>(count);
std::memcpy(ownedDescs.get(), descriptions, count * sizeof(VkVertexInputAttributeDescription));
m_template.attributeDescriptions = std::move(ownedDescs);
m_template.attributeDescriptionCount = count;
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::set_vertex_binding_descriptions(
const VkVertexInputBindingDescription* descriptions, uint32_t count) {
auto ownedDescs = std::make_unique<VkVertexInputBindingDescription[]>(count);
std::memcpy(ownedDescs.get(), descriptions, count * sizeof(VkVertexInputBindingDescription));
m_template.bindingDescriptions = std::move(ownedDescs);
m_template.bindingDescriptionCount = count;
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::set_cull_mode(VkCullModeFlags cullMode) {
m_template.cullMode = cullMode;
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::set_blend_enabled(bool enable) {
if (enable) {
m_template.flags |= GraphicsPipelineTemplate::FLAG_BLEND_ENABLED;
}
else {
m_template.flags &= ~GraphicsPipelineTemplate::FLAG_BLEND_ENABLED;
}
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::set_color_blend(VkBlendOp op, VkBlendFactor src,
VkBlendFactor dst) {
m_template.colorBlendOp = op;
m_template.srcColorBlendFactor = src;
m_template.dstColorBlendFactor = dst;
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::set_alpha_blend(VkBlendOp op, VkBlendFactor src,
VkBlendFactor dst) {
m_template.alphaBlendOp = op;
m_template.srcAlphaBlendFactor = src;
m_template.dstAlphaBlendFactor = dst;
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::set_depth_test_enabled(bool enable) {
if (enable) {
m_template.flags |= GraphicsPipelineTemplate::FLAG_DEPTH_TEST_ENABLED;
}
else {
m_template.flags &= ~GraphicsPipelineTemplate::FLAG_DEPTH_TEST_ENABLED;
}
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::set_depth_write_enabled(bool enable) {
if (enable) {
m_template.flags |= GraphicsPipelineTemplate::FLAG_DEPTH_WRITE_ENABLED;
}
else {
m_template.flags &= ~GraphicsPipelineTemplate::FLAG_DEPTH_WRITE_ENABLED;
}
return *this;
}
GraphicsPipelineBuilder& GraphicsPipelineBuilder::set_depth_compare_op(VkCompareOp compareOp) {
m_template.depthCompareOp = compareOp;
return *this;
}
Memory::IntrusivePtr<GraphicsPipeline> GraphicsPipelineBuilder::build() {
return GraphicsPipeline::create(std::move(m_template));
}
|
whupdup/frame
|
real/graphics/graphics_pipeline_builder.cpp
|
C++
|
gpl-3.0
| 3,656
|
#include "image.hpp"
#include <graphics/render_context.hpp>
#include <Tracy.hpp>
using namespace ZN;
using namespace ZN::GFX;
static int64_t g_counter = 0;
Memory::IntrusivePtr<Image> Image::create(const VkImageCreateInfo& createInfo,
VmaMemoryUsage memoryUsage, VkMemoryPropertyFlags requiredFlags) {
VmaAllocationCreateInfo allocInfo{};
allocInfo.usage = memoryUsage;
allocInfo.requiredFlags = requiredFlags;
VkImage image;
VmaAllocation allocation;
if (vmaCreateImage(g_renderContext->get_allocator(), &createInfo, &allocInfo, &image,
&allocation, nullptr) == VK_SUCCESS) {
return Memory::IntrusivePtr<Image>(new Image(image, allocation, createInfo.extent,
createInfo.format, createInfo.samples, createInfo.mipLevels,
createInfo.arrayLayers, false));
}
return {};
}
Memory::IntrusivePtr<Image> Image::create(VkImage image, VkFormat format,
VkExtent3D extent, Badge<RenderContext>) {
return Memory::IntrusivePtr<Image>(new Image(image, VK_NULL_HANDLE, std::move(extent),
format, VK_SAMPLE_COUNT_1_BIT, 1, 1, true));
}
Image::Image(VkImage image, VmaAllocation allocation, VkExtent3D extent, VkFormat format,
VkSampleCountFlagBits sampleCount, uint32_t levelCount, uint32_t layerCount,
bool swapchainImage)
: m_image(image)
, m_allocation(allocation)
, m_extent(extent)
, m_format(format)
, m_sampleCount(sampleCount)
, m_levelCount(levelCount)
, m_layerCount(layerCount)
, m_swapchainImage(swapchainImage) {
++g_counter;
TracyPlot("Images", g_counter);
}
Image::~Image() {
--g_counter;
TracyPlot("Images", g_counter);
if (m_image != VK_NULL_HANDLE && !m_swapchainImage) {
g_renderContext->queue_delete_late([image=m_image, allocation=m_allocation] {
vmaDestroyImage(g_renderContext->get_allocator(), image, allocation);
});
}
}
void Image::delete_late() {
if (m_image != VK_NULL_HANDLE && !m_swapchainImage) {
g_renderContext->queue_delete_late([image=m_image, allocation=m_allocation] {
vmaDestroyImage(g_renderContext->get_allocator(), image, allocation);
});
m_image = VK_NULL_HANDLE;
}
}
void Image::update_subresources(BarrierInfoCollection& barrierInfo,
const ImageResourceTracker::ResourceInfo& range,
ImageResourceTracker::BarrierMode barrierMode, bool ignorePreviousState) {
m_resourceTracker.update_range(m_image, barrierInfo, range, barrierMode, ignorePreviousState);
}
Image::operator VkImage() const {
return m_image;
}
VkImage Image::get_image() const {
return m_image;
}
VmaAllocation Image::get_allocation() const {
return m_allocation;
}
VkExtent3D Image::get_extent() const {
return m_extent;
}
VkFormat Image::get_format() const {
return m_format;
}
VkSampleCountFlagBits Image::get_sample_count() const {
return m_sampleCount;
}
bool Image::is_swapchain_image() const {
return m_swapchainImage;
}
|
whupdup/frame
|
real/graphics/image.cpp
|
C++
|
gpl-3.0
| 2,830
|
#pragma once
#include <core/badge.hpp>
#include <core/intrusive_ptr.hpp>
#include <graphics/image_resource_tracker.hpp>
#include <graphics/unique_graphics_object.hpp>
#include <volk.h>
#include <vk_mem_alloc.h>
namespace ZN::GFX {
class CommandBuffer;
class RenderContext;
class Image final : public Memory::ThreadSafeIntrusivePtrEnabled<Image>,
public UniqueGraphicsObject {
public:
static Memory::IntrusivePtr<Image> create(const VkImageCreateInfo&, VmaMemoryUsage,
VkMemoryPropertyFlags requiredFlags = 0);
static Memory::IntrusivePtr<Image> create(VkImage, VkFormat, VkExtent3D,
Badge<RenderContext>);
~Image();
NULL_COPY_AND_ASSIGN(Image);
void delete_late();
void update_subresources(BarrierInfoCollection& barrierInfo,
const ImageResourceTracker::ResourceInfo& range,
ImageResourceTracker::BarrierMode barrierMode, bool ignorePreviousState);
operator VkImage() const;
VkImage get_image() const;
VmaAllocation get_allocation() const;
VkExtent3D get_extent() const;
VkFormat get_format() const;
VkSampleCountFlagBits get_sample_count() const;
bool is_swapchain_image() const;
private:
VkImage m_image;
ImageResourceTracker m_resourceTracker;
VmaAllocation m_allocation;
VkExtent3D m_extent;
VkFormat m_format;
VkSampleCountFlagBits m_sampleCount;
uint32_t m_levelCount;
uint32_t m_layerCount;
bool m_swapchainImage;
explicit Image(VkImage, VmaAllocation, VkExtent3D, VkFormat, VkSampleCountFlagBits,
uint32_t levelCount, uint32_t layerCOunt, bool);
};
}
|
whupdup/frame
|
real/graphics/image.hpp
|
C++
|
gpl-3.0
| 1,544
|
#include "image_resource_tracker.hpp"
#include <graphics/barrier_info_collection.hpp>
using namespace ZN;
using namespace ZN::GFX;
static bool ranges_intersect(const VkImageSubresourceRange& a, const VkImageSubresourceRange& b);
// A fully covers B
static bool range_fully_covers(const VkImageSubresourceRange& a, const VkImageSubresourceRange& b);
// ResourceInfo
bool ImageResourceTracker::ResourceInfo::states_equal(const ResourceInfo& other) const {
return stageFlags == other.stageFlags && layout == other.layout
&& accessMask == other.accessMask && queueFamilyIndex == other.queueFamilyIndex;
}
bool ImageResourceTracker::ResourceInfo::intersects(const ResourceInfo& other) const {
return ranges_intersect(range, other.range);
}
bool ImageResourceTracker::ResourceInfo::fully_covers(const ResourceInfo& other) const {
return range_fully_covers(range, other.range);
}
// ImageResourceTracker
void ImageResourceTracker::update_range(VkImage image, BarrierInfoCollection& barrierInfo,
const ResourceInfo& rangeIn, BarrierMode barrierMode, bool ignorePreviousState) {
insert_range_internal(image, rangeIn, barrierInfo, barrierMode, ignorePreviousState);
union_ranges();
}
size_t ImageResourceTracker::get_range_count() const {
return m_ranges.size();
}
bool ImageResourceTracker::has_overlapping_ranges() const {
for (size_t i = 0, l = m_ranges.size(); i < l; ++i) {
auto& a = m_ranges[i];
for (size_t j = i + 1; j < l; ++j) {
auto& b = m_ranges[j];
if (a.intersects(b)) {
return true;
}
}
}
return false;
}
bool ImageResourceTracker::has_range(const ResourceInfo& rangeIn) const {
for (auto& range : m_ranges) {
if (range.states_equal(rangeIn) && range.fully_covers(rangeIn)
&& rangeIn.fully_covers(range)) {
return true;
}
}
return false;
}
void ImageResourceTracker::insert_range_internal(VkImage image, const ResourceInfo& rangeIn,
BarrierInfoCollection& barrierInfo, BarrierMode barrierMode, bool ignorePreviousState,
bool generateLastBarrier) {
bool insertRangeIn = true;
if (!m_ranges.empty()) {
for (size_t i = m_ranges.size() - 1;; --i) {
auto& range = m_ranges[i];
auto statesEqual = range.states_equal(rangeIn);
bool needsQFOT = !ignorePreviousState
&& range.queueFamilyIndex != rangeIn.queueFamilyIndex;
if (range.fully_covers(rangeIn) && (statesEqual || needsQFOT)) {
// CASE 1: `rangeIn` is entirely covered by `range`, and states are equal, meaning
// no alternations need to be made to the range list
if (needsQFOT) {
add_barrier(image, barrierInfo, range, rangeIn, range.range,
ignorePreviousState);
range.queueFamilyIndex = rangeIn.queueFamilyIndex;
}
else if (barrierMode == BarrierMode::ALWAYS) {
add_barrier(image, barrierInfo, range, rangeIn, rangeIn.range,
ignorePreviousState);
}
return;
}
else if (rangeIn.fully_covers(range) && (ignorePreviousState || statesEqual)) {
// CASE 2: input range fully covers existing range, therefore remove it.
// This case is only valid if `rangeIn` can supersede the previous value
if (barrierMode == BarrierMode::ALWAYS) {
add_barrier(image, barrierInfo, range, rangeIn, std::move(range.range),
ignorePreviousState);
generateLastBarrier = false;
}
m_ranges[i] = std::move(m_ranges.back());
m_ranges.pop_back();
}
else if (rangeIn.intersects(range)) {
// CASE 3: input range partially covers existing range, generate difference
// between the 2 ranges. If there is a barrier of interest, it will be on
// the intersection of both
// CASE 3a: Needs QFOT, entire source range must be transitioned
if (needsQFOT && !rangeIn.fully_covers(range)) {
add_barrier(image, barrierInfo, range, rangeIn, range.range, false);
generate_range_difference(rangeIn, range, image, barrierInfo, barrierMode,
ignorePreviousState, generateLastBarrier);
range.queueFamilyIndex = rangeIn.queueFamilyIndex;
generateLastBarrier = false;
insertRangeIn = false;
}
else {
auto rangeCopy = std::move(range);
m_ranges[i] = std::move(m_ranges.back());
m_ranges.pop_back();
bool needsUniqueBarriers = barrierMode == BarrierMode::ALWAYS || !statesEqual;
if (needsUniqueBarriers && barrierMode != BarrierMode::NEVER) {
auto oldState = make_range_intersection(rangeCopy, rangeIn);
add_barrier(image, barrierInfo, oldState, rangeIn,
std::move(oldState.range), ignorePreviousState);
}
//puts("Emitting range difference of src range");
generate_range_difference(rangeCopy, rangeIn, image, barrierInfo, barrierMode,
ignorePreviousState, false);
if (!ignorePreviousState) {
if (needsUniqueBarriers) {
//puts("Emitting range difference of rangeIn");
generate_range_difference(rangeIn, rangeCopy, image, barrierInfo,
barrierMode, ignorePreviousState, generateLastBarrier);
m_ranges.emplace_back(make_range_intersection(rangeIn, rangeCopy));
return;
}
generateLastBarrier = false;
}
}
}
if (i == 0) {
break;
}
}
}
if (insertRangeIn) {
m_ranges.push_back(rangeIn);
}
// Range inserted here has no intersections, therefore is always src = UNDEFINED
if (generateLastBarrier && barrierMode != BarrierMode::NEVER) {
//puts("GENERATING FINAL BARRIER");
VkImageMemoryBarrier barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = rangeIn.accessMask,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = rangeIn.layout,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = image,
.subresourceRange = std::move(rangeIn.range)
};
barrierInfo.add_image_memory_barrier(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
rangeIn.stageFlags, 0, std::move(barrier));
}
}
void ImageResourceTracker::generate_range_difference(const ResourceInfo& a, const ResourceInfo& b,
VkImage image, BarrierInfoCollection& barrierInfo, BarrierMode barrierMode,
bool ignorePreviousState, bool generateLastBarrier) {
auto aMinX = a.range.baseMipLevel;
auto aMaxX = a.range.baseMipLevel + a.range.levelCount;
auto aMinY = a.range.baseArrayLayer;
auto aMaxY = a.range.baseArrayLayer + a.range.layerCount;
auto bMinX = b.range.baseMipLevel;
auto bMaxX = b.range.baseMipLevel + b.range.levelCount;
auto bMinY = b.range.baseArrayLayer;
auto bMaxY = b.range.baseArrayLayer + b.range.layerCount;
auto sideMinY = aMinY;
auto sideMaxY = aMaxY;
if (aMinY < bMinY) {
insert_range_like(a, image, barrierInfo, barrierMode, ignorePreviousState,
generateLastBarrier, aMinX, aMinY, aMaxX, bMinY);
sideMinY = bMinY;
}
if (bMaxY < aMaxY) {
insert_range_like(a, image, barrierInfo, barrierMode, ignorePreviousState,
generateLastBarrier, aMinX, bMaxY, aMaxX, aMaxY);
sideMaxY = bMaxY;
}
if (aMinX < bMinX) {
insert_range_like(a, image, barrierInfo, barrierMode, ignorePreviousState,
generateLastBarrier, aMinX, sideMinY, bMinX, sideMaxY);
}
if (bMaxX < aMaxX) {
insert_range_like(a, image, barrierInfo, barrierMode, ignorePreviousState,
generateLastBarrier, bMaxX, sideMinY, aMaxX, sideMaxY);
}
}
void ImageResourceTracker::insert_range_like(const ResourceInfo& info, VkImage image,
BarrierInfoCollection& barrierInfo, BarrierMode barrierMode, bool ignorePreviousState,
bool generateLastBarrier, uint32_t minX, uint32_t minY, uint32_t maxX, uint32_t maxY) {
insert_range_internal(image, create_range_like(info, minX, minY, maxX, maxY), barrierInfo,
barrierMode, ignorePreviousState, generateLastBarrier);
}
void ImageResourceTracker::union_ranges() {
bool foundUnion = false;
do {
foundUnion = false;
for (size_t i = 0; i < m_ranges.size(); ++i) {
auto& a = m_ranges[i];
for (size_t j = i + 1; j < m_ranges.size(); ++j) {
auto& b = m_ranges[j];
if (!a.states_equal(b)) {
continue;
}
auto aMinX = a.range.baseMipLevel;
auto aMaxX = a.range.baseMipLevel + a.range.levelCount;
auto aMinY = a.range.baseArrayLayer;
auto aMaxY = a.range.baseArrayLayer + a.range.layerCount;
auto bMinX = b.range.baseMipLevel;
auto bMaxX = b.range.baseMipLevel + b.range.levelCount;
auto bMinY = b.range.baseArrayLayer;
auto bMaxY = b.range.baseArrayLayer + b.range.layerCount;
if (aMinX == bMinX && aMaxX == bMaxX) {
if (aMaxY == bMinY) {
foundUnion = true;
//puts("Union case 1");
a.range.layerCount = bMaxY - aMinY;
}
else if (aMinY == bMaxY) {
foundUnion = true;
//puts("Union case 2");
a.range.baseArrayLayer = bMinY;
a.range.layerCount = aMaxY - bMinY;
}
}
else if (aMinY == bMinY && aMaxY == bMaxY) {
if (aMaxX == bMinX) {
foundUnion = true;
//printf("Union case 3 {%u, %u, %u, %u} U {%u, %u, %u, %u}\n",
// aMinX, aMinY, aMaxX, aMaxY, bMinX, bMinY, bMaxX, bMaxY);
a.range.levelCount = bMaxX - aMinX;
}
else if (aMinX == bMaxX) {
foundUnion = true;
//puts("Union case 4");
a.range.baseMipLevel = bMinX;
a.range.levelCount = aMaxX - bMinX;
}
}
if (foundUnion) {
m_ranges[j] = std::move(m_ranges.back());
m_ranges.pop_back();
break;
}
}
if (foundUnion) {
break;
}
}
}
while (foundUnion);
}
ImageResourceTracker::ResourceInfo ImageResourceTracker::create_range_like(
const ResourceInfo& info, uint32_t minX, uint32_t minY, uint32_t maxX, uint32_t maxY) {
return {
.stageFlags = info.stageFlags,
.layout = info.layout,
.accessMask = info.accessMask,
.queueFamilyIndex = info.queueFamilyIndex,
.range = {
.aspectMask = info.range.aspectMask,
.baseMipLevel = minX,
.levelCount = maxX - minX,
.baseArrayLayer = minY,
.layerCount = maxY - minY
}
};
}
void ImageResourceTracker::add_barrier(VkImage image, BarrierInfoCollection& barrierInfo,
const ResourceInfo& from, const ResourceInfo& to, VkImageSubresourceRange range,
bool ignorePreviousState) {
VkImageMemoryBarrier barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = ignorePreviousState ? 0 : from.accessMask,
.dstAccessMask = to.accessMask,
.oldLayout = ignorePreviousState ? VK_IMAGE_LAYOUT_UNDEFINED : from.layout,
.newLayout = to.layout,
.srcQueueFamilyIndex = from.queueFamilyIndex,
.dstQueueFamilyIndex = to.queueFamilyIndex,
.image = image,
.subresourceRange = std::move(range)
};
if (barrier.srcQueueFamilyIndex == barrier.dstQueueFamilyIndex) {
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
}
barrierInfo.add_image_memory_barrier(from.stageFlags, to.stageFlags,
0 /* FIXME: figure out how to pass dependency flags */, std::move(barrier));
}
ImageResourceTracker::ResourceInfo ImageResourceTracker::make_range_intersection(
const ResourceInfo& a, const ResourceInfo& b) {
auto aMinX = a.range.baseMipLevel;
auto aMaxX = a.range.baseMipLevel + a.range.levelCount;
auto aMinY = a.range.baseArrayLayer;
auto aMaxY = a.range.baseArrayLayer + a.range.layerCount;
auto bMinX = b.range.baseMipLevel;
auto bMaxX = b.range.baseMipLevel + b.range.levelCount;
auto bMinY = b.range.baseArrayLayer;
auto bMaxY = b.range.baseArrayLayer + b.range.layerCount;
auto minX = aMinX > bMinX ? aMinX : bMinX;
auto minY = aMinY > bMinY ? aMinY : bMinY;
auto maxX = aMaxX < bMaxX ? aMaxX : bMaxX;
auto maxY = aMaxY < bMaxY ? aMaxY : bMaxY;
return {
.stageFlags = a.stageFlags,
.layout = a.layout,
.accessMask = a.accessMask,
.queueFamilyIndex = a.queueFamilyIndex,
.range = {
.aspectMask = a.range.aspectMask,
.baseMipLevel = minX,
.levelCount = maxX - minX,
.baseArrayLayer = minY,
.layerCount = maxY - minY
}
};
}
static bool ranges_intersect(const VkImageSubresourceRange& a, const VkImageSubresourceRange& b) {
return (a.baseMipLevel < b.baseMipLevel + b.levelCount
&& a.baseMipLevel + a.levelCount > b.baseMipLevel)
&& (a.baseArrayLayer < b.baseArrayLayer + b.layerCount
&& a.baseArrayLayer + a.layerCount > b.baseArrayLayer);
}
// A fully covers B
static bool range_fully_covers(const VkImageSubresourceRange& a,
const VkImageSubresourceRange& b) {
return b.baseMipLevel >= a.baseMipLevel
&& b.baseMipLevel + b.levelCount <= a.baseMipLevel + a.levelCount
&& b.baseArrayLayer >= a.baseArrayLayer
&& b.baseArrayLayer + b.layerCount <= a.baseArrayLayer + a.layerCount;
}
|
whupdup/frame
|
real/graphics/image_resource_tracker.cpp
|
C++
|
gpl-3.0
| 12,566
|
#pragma once
#include <volk.h>
#include <vector>
namespace ZN::GFX {
class BarrierInfoCollection;
class ImageResourceTracker final {
public:
struct ResourceInfo {
VkPipelineStageFlags stageFlags;
VkImageLayout layout;
VkAccessFlags accessMask;
uint32_t queueFamilyIndex;
VkImageSubresourceRange range;
bool states_equal(const ResourceInfo& other) const;
bool intersects(const ResourceInfo& other) const;
bool fully_covers(const ResourceInfo& other) const;
};
enum class BarrierMode {
TRANSITIONS_ONLY,
ALWAYS,
NEVER
};
void update_range(VkImage image, BarrierInfoCollection& barrierInfo,
const ResourceInfo& rangeIn, BarrierMode barrierMode, bool ignorePreviousState);
size_t get_range_count() const;
bool has_overlapping_ranges() const;
bool has_range(const ResourceInfo& rangeIn) const;
private:
std::vector<ResourceInfo> m_ranges;
void insert_range_internal(VkImage image, const ResourceInfo& rangeIn,
BarrierInfoCollection& barrierInfo, BarrierMode barrierMode,
bool ignorePreviousState, bool generateLastBarrier = true);
// Emits ranges that are pieces of A if B was subtracted from it, meaning the resulting
// ranges include none of B's coverage
void generate_range_difference(const ResourceInfo& a, const ResourceInfo& b, VkImage image,
BarrierInfoCollection& barrierInfo, BarrierMode barrierMode,
bool ignorePreviousState, bool generateLastBarrier);
void insert_range_like(const ResourceInfo& info, VkImage image,
BarrierInfoCollection& barrierInfo, BarrierMode barrierMode,
bool ignorePreviousState, bool generateLastBarrier, uint32_t minX, uint32_t minY,
uint32_t maxX, uint32_t maxY);
void union_ranges();
static ResourceInfo create_range_like(const ResourceInfo& info, uint32_t minX,
uint32_t minY, uint32_t maxX, uint32_t maxY);
static void add_barrier(VkImage image, BarrierInfoCollection& barrierInfo,
const ResourceInfo& from, const ResourceInfo& to, VkImageSubresourceRange range,
bool ignorePreviousState);
static ResourceInfo make_range_intersection(const ResourceInfo& a, const ResourceInfo& b);
};
}
|
whupdup/frame
|
real/graphics/image_resource_tracker.hpp
|
C++
|
gpl-3.0
| 2,150
|
#include "image_view.hpp"
#include <graphics/image.hpp>
#include <graphics/render_context.hpp>
#include <graphics/vk_initializers.hpp>
#include <Tracy.hpp>
using namespace ZN;
using namespace ZN::GFX;
static int64_t g_counter = 0;
Memory::IntrusivePtr<ImageView> ImageView::create(Image& image, VkImageViewType viewType,
VkFormat format, VkImageAspectFlags aspectFlags, uint32_t mipLevels,
uint32_t arrayLayers) {
VkImageViewCreateInfo createInfo = vkinit::image_view_create_info(viewType, format,
image.get_image(), aspectFlags, mipLevels, arrayLayers);
return create(image, createInfo);
}
Memory::IntrusivePtr<ImageView> ImageView::create(Image& image,
const VkImageViewCreateInfo& createInfo) {
assert(createInfo.image == image.get_image()
&& "Cannot create image view with different VkImage");
VkImageView imageView;
if (vkCreateImageView(g_renderContext->get_device(), &createInfo, nullptr, &imageView)
== VK_SUCCESS) {
return Memory::IntrusivePtr<ImageView>(new ImageView(imageView, image,
createInfo.subresourceRange));
}
return {};
}
Memory::IntrusivePtr<ImageView> ImageView::create(Image& image, VkImageView imageView,
Badge<RenderContext>) {
return Memory::IntrusivePtr<ImageView>(new ImageView(imageView, image,
VkImageSubresourceRange{.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1}));
}
ImageView::ImageView(VkImageView imageView, Image& image, VkImageSubresourceRange range)
: m_imageView(imageView)
, m_image(image.reference_from_this())
, m_range(std::move(range)) {
++g_counter;
TracyPlot("ImageViews", g_counter);
}
ImageView::~ImageView() {
--g_counter;
TracyPlot("ImageViews", g_counter);
if (m_imageView != VK_NULL_HANDLE) {
g_renderContext->queue_delete_late([imageView=this->m_imageView] {
vkDestroyImageView(g_renderContext->get_device(), imageView, nullptr);
});
}
}
void ImageView::delete_late() {
if (m_imageView != VK_NULL_HANDLE) {
g_renderContext->queue_delete_late([imageView=this->m_imageView] {
vkDestroyImageView(g_renderContext->get_device(), imageView, nullptr);
});
m_imageView = VK_NULL_HANDLE;
}
}
ImageView::operator VkImageView() const {
return m_imageView;
}
VkImageView ImageView::get_image_view() const {
return m_imageView;
}
Image& ImageView::get_image() const {
return *m_image;
}
const VkImageSubresourceRange& ImageView::get_subresource_range() const {
return m_range;
}
bool ImageView::is_swapchain_image() const {
return m_image->is_swapchain_image();
}
|
whupdup/frame
|
real/graphics/image_view.cpp
|
C++
|
gpl-3.0
| 2,566
|
#pragma once
#include <core/badge.hpp>
#include <graphics/image.hpp>
namespace ZN::GFX {
class RenderContext;
class ImageView final : public Memory::ThreadSafeIntrusivePtrEnabled<ImageView>,
public UniqueGraphicsObject {
public:
static Memory::IntrusivePtr<ImageView> create(Image&, VkImageViewType, VkFormat,
VkImageAspectFlags, uint32_t mipLevels = 1, uint32_t arrayLayers = 1);
static Memory::IntrusivePtr<ImageView> create(Image&, const VkImageViewCreateInfo&);
static Memory::IntrusivePtr<ImageView> create(Image&, VkImageView, Badge<RenderContext>);
~ImageView();
NULL_COPY_AND_ASSIGN(ImageView);
void delete_late();
operator VkImageView() const;
VkImageView get_image_view() const;
Image& get_image() const;
const VkImageSubresourceRange& get_subresource_range() const;
bool is_swapchain_image() const;
private:
VkImageView m_imageView;
Memory::IntrusivePtr<Image> m_image;
VkImageSubresourceRange m_range;
explicit ImageView(VkImageView, Image&, VkImageSubresourceRange);
};
}
|
whupdup/frame
|
real/graphics/image_view.hpp
|
C++
|
gpl-3.0
| 1,037
|
#include "material.hpp"
#include <graphics/command_buffer.hpp>
#include <graphics/mesh_pool.hpp>
#include <graphics/render_context.hpp>
using namespace ZN;
using namespace ZN::GFX;
Material::Material(MeshPool& pool)
: m_meshPool(pool)
, m_instanceIndexOutputBuffer(GFX::Buffer::create(INSTANCE_BUFFER_INITIAL_CAPACITY
* sizeof(InstanceIndices), VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
VMA_MEMORY_USAGE_GPU_ONLY))
, m_meshMaterialIndex(pool.register_material()) {
ScopedLock lock(*s_materialListMutex);
s_materials.emplace_back(this);
}
Material::~Material() {
ScopedLock lock(*s_materialListMutex);
for (auto it = s_materials.begin(), end = s_materials.end(); it != end; ++it) {
if (*it == this) {
s_materials.erase(it);
return;
}
}
}
void Material::remove_instance(InstanceHandle handle) {
ScopedLock lock(*m_mutex);
auto lastIndex = m_instanceIndexBuffer.back();
m_instanceIndexBuffer[m_instanceIndexSparse[handle.value]] = lastIndex;
m_instanceIndexSparse[lastIndex.objectIndex] = m_instanceIndexSparse[handle.value];
m_instanceIndexSparse[handle.value] = INVALID_HANDLE_VALUE;
m_instanceIndexBuffer.pop_back();
}
uint32_t Material::add_instance_index(uint32_t meshIndex) {
uint32_t result = INVALID_HANDLE_VALUE;
if (m_freeList == INVALID_HANDLE_VALUE) {
result = static_cast<uint32_t>(m_instanceIndexSparse.size());
m_instanceIndexBuffer.emplace_back(InstanceIndices{
.objectIndex = result,
.meshIndex = meshIndex,
});
m_instanceIndexSparse.emplace_back(static_cast<uint32_t>(m_instanceIndexBuffer.size()
- 1));
}
else {
result = m_freeList;
m_instanceIndexBuffer.emplace_back(InstanceIndices{
.objectIndex = m_freeList,
.meshIndex = meshIndex,
});
m_freeList = m_instanceIndexSparse[m_freeList];
m_instanceIndexSparse[result] = static_cast<uint32_t>(m_instanceIndexBuffer.size() - 1);
}
if (m_instanceIndexOutputBuffer->get_size() < m_instanceIndexBuffer.size()
* sizeof(uint32_t)) {
m_instanceIndexOutputBuffer = GFX::Buffer::create(m_instanceIndexBuffer.size()
* sizeof(uint32_t), VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
}
return result;
}
void Material::update_instance_mesh(InstanceHandle handle, Mesh& mesh) {
ScopedLock lock(*m_mutex);
auto& indices = m_instanceIndexBuffer[m_instanceIndexSparse[handle.value]];
indices.meshIndex = mesh.get_mesh_index();
}
MeshPool& Material::get_mesh_pool() const {
return m_meshPool;
}
Buffer& Material::get_instance_index_buffer() const {
ScopedLock lock(*m_mutex);
return m_instanceIndexBuffer.buffer();
}
Buffer& Material::get_instance_index_output_buffer() const {
ScopedLock lock(*m_mutex);
return *m_instanceIndexOutputBuffer;
}
uint32_t Material::get_instance_count() const {
ScopedLock lock(*m_mutex);
return static_cast<uint32_t>(m_instanceIndexBuffer.size());
}
void Material::render_internal(CommandBuffer& cmd) {
if (m_instanceIndexBuffer.empty()) {
return;
}
VkDeviceSize offset{};
VkBuffer vertexBuffer = m_meshPool.get_vertex_buffer();
VkBuffer indexBuffer = m_meshPool.get_index_buffer();
cmd.bind_vertex_buffers(0, 1, &vertexBuffer, &offset);
cmd.bind_index_buffer(indexBuffer, 0, m_meshPool.get_index_type());
cmd.draw_indexed_indirect(m_meshPool.get_gpu_indirect_buffer(),
m_meshPool.get_indirect_buffer_offset(m_meshMaterialIndex),
m_meshPool.get_mesh_count(), sizeof(VkDrawIndexedIndirectCommand));
}
|
whupdup/frame
|
real/graphics/material.cpp
|
C++
|
gpl-3.0
| 3,426
|
#pragma once
#include <core/scoped_lock.hpp>
#include <graphics/buffer_vector.hpp>
#include <graphics/mesh_pool.hpp>
#include <scheduler/task_scheduler.hpp>
namespace ZN::GFX {
class CommandBuffer;
struct InstanceHandle {
uint32_t value;
};
struct InstanceIndices {
uint32_t objectIndex;
uint32_t meshIndex;
};
class Material {
public:
static constexpr const size_t INSTANCE_BUFFER_INITIAL_CAPACITY = 16384ull;
static constexpr const uint32_t INVALID_HANDLE_VALUE = ~0u;
virtual ~Material();
NULL_COPY_AND_ASSIGN(Material);
virtual void indirect_cull(CommandBuffer&) = 0;
virtual void render_depth_only(CommandBuffer&, VkDescriptorSet globalDescriptor) = 0;
virtual void render_forward(CommandBuffer&, VkDescriptorSet globalDescriptor,
VkDescriptorSet aoDescriptor) = 0;
virtual Buffer& get_instance_buffer() const = 0;
void remove_instance(InstanceHandle);
void update_instance_mesh(InstanceHandle, Mesh&);
MeshPool& get_mesh_pool() const;
Buffer& get_instance_index_buffer() const;
Buffer& get_instance_index_output_buffer() const;
uint32_t get_instance_count() const;
template <typename Functor>
static void for_each_material(Functor&& func) {
ScopedLock lock(*s_materialListMutex);
for (auto* pMaterial : s_materials) {
func(*pMaterial);
}
}
protected:
void render_internal(CommandBuffer&);
private:
using InstanceIndexBuffer = BufferVector<InstanceIndices,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, INSTANCE_BUFFER_INITIAL_CAPACITY,
VMA_MEMORY_USAGE_CPU_TO_GPU>;
static inline std::vector<Material*> s_materials;
static inline IntrusivePtr<Scheduler::Mutex> s_materialListMutex
= Scheduler::Mutex::create();
MeshPool& m_meshPool;
InstanceIndexBuffer m_instanceIndexBuffer;
IntrusivePtr<Buffer> m_instanceIndexOutputBuffer;
std::vector<uint32_t> m_instanceIndexSparse;
uint32_t m_meshMaterialIndex;
uint32_t m_freeList = INVALID_HANDLE_VALUE;
explicit Material(MeshPool&);
uint32_t add_instance_index(uint32_t meshIndex);
template <typename Derived, typename InstanceType>
friend class MaterialImpl;
protected:
IntrusivePtr<Scheduler::Mutex> m_mutex = Scheduler::Mutex::create();
};
template <typename Derived, typename InstanceType>
class MaterialImpl : public Material {
public:
template <typename... Args>
InstanceHandle add_instance(Mesh& mesh, Args&&... args) {
auto meshIndex = mesh.get_mesh_index();
ScopedLock lock(*m_mutex);
auto objectIndex = add_instance_index(meshIndex);
m_instanceBuffer.emplace_back(std::forward<Args>(args)...);
return {objectIndex};
}
InstanceType& get_instance(InstanceHandle handle) {
return m_instanceBuffer[handle.value];
}
Buffer& get_instance_buffer() const override {
return m_instanceBuffer.buffer();
}
protected:
explicit MaterialImpl(MeshPool& pool)
: Material(pool) {}
private:
using InstanceBuffer = BufferVector<InstanceType, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
INSTANCE_BUFFER_INITIAL_CAPACITY, VMA_MEMORY_USAGE_CPU_TO_GPU>;
InstanceBuffer m_instanceBuffer;
};
}
|
whupdup/frame
|
real/graphics/material.hpp
|
C++
|
gpl-3.0
| 3,091
|
#include "mesh_pool.hpp"
#include <core/scoped_lock.hpp>
using namespace ZN;
using namespace ZN::GFX;
// MeshPool
MeshPool::MeshPool()
: m_gpuIndirectCommandBuffer(Buffer::create(INDIRECT_COMMAND_BUFFER_INITIAL_CAPACITY
* sizeof(VkDrawIndexedIndirectCommand), VK_BUFFER_USAGE_TRANSFER_DST_BIT
| VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
VMA_MEMORY_USAGE_GPU_ONLY))
, m_mutex(Scheduler::Mutex::create()) {}
IntrusivePtr<Mesh> MeshPool::register_mesh(uint32_t vertexOffset, uint32_t indexOffset,
uint32_t indexCount) {
IntrusivePtr result(new Mesh(*this, vertexOffset, indexOffset, indexCount,
static_cast<uint32_t>(m_meshes.size())));
if (m_materialCount > 0) {
for (uint32_t i = m_materialCount - 1;; --i) {
m_zeroedIndirectCommandBuffer.insert(m_zeroedIndirectCommandBuffer.end()
- i * m_meshes.size(), {
.indexCount = indexCount,
.instanceCount = 0,
.firstIndex = indexOffset,
.vertexOffset = static_cast<int32_t>(vertexOffset),
.firstInstance = 0,
});
if (i == 0) {
break;
}
}
ensure_command_buffer_capacity();
}
m_meshes.emplace_back(result.get());
return result;
}
uint32_t MeshPool::register_material() {
ScopedLock lock(*m_mutex);
for (auto* mesh : m_meshes) {
m_zeroedIndirectCommandBuffer.push_back({
.indexCount = mesh->get_index_count(),
.instanceCount = 0,
.firstIndex = mesh->get_index_offset(),
.vertexOffset = static_cast<int32_t>(mesh->get_vertex_offset()),
.firstInstance = 0,
});
}
ensure_command_buffer_capacity();
return m_materialCount++;
}
uint32_t MeshPool::get_mesh_vertex_offset(uint32_t meshIndex) const {
ScopedLock lock(*m_mutex);
return m_meshes[meshIndex]->get_vertex_offset();
}
uint32_t MeshPool::get_mesh_index_offset(uint32_t meshIndex) const {
ScopedLock lock(*m_mutex);
return m_meshes[meshIndex]->get_index_offset();
}
uint32_t MeshPool::get_mesh_index_count(uint32_t meshIndex) const {
ScopedLock lock(*m_mutex);
return m_meshes[meshIndex]->get_index_count();
}
uint32_t MeshPool::get_mesh_count() const {
ScopedLock lock(*m_mutex);
return static_cast<uint32_t>(m_meshes.size());
}
Buffer& MeshPool::get_zeroed_indirect_buffer() const {
ScopedLock lock(*m_mutex);
return m_zeroedIndirectCommandBuffer.buffer();
}
Buffer& MeshPool::get_gpu_indirect_buffer() const {
ScopedLock lock(*m_mutex);
return *m_gpuIndirectCommandBuffer;
}
VkDeviceSize MeshPool::get_indirect_buffer_offset(uint32_t materialIndex) const {
ScopedLock lock(*m_mutex);
return static_cast<VkDeviceSize>(materialIndex) * m_meshes.size()
* sizeof(VkDrawIndexedIndirectCommand);
}
void MeshPool::ensure_command_buffer_capacity() {
if (m_zeroedIndirectCommandBuffer.byte_capacity() > m_gpuIndirectCommandBuffer->get_size()) {
m_gpuIndirectCommandBuffer = Buffer::create(m_zeroedIndirectCommandBuffer.byte_capacity(),
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT,
VMA_MEMORY_USAGE_GPU_ONLY);
}
}
// Mesh
Mesh::Mesh(MeshPool& pool, uint32_t vertexOffset, uint32_t indexOffset, uint32_t indexCount,
uint32_t meshIndex)
: m_vertexOffset(vertexOffset)
, m_indexOffset(indexOffset)
, m_indexCount(indexCount)
, m_meshIndex(meshIndex)
, m_pool(pool) {}
Mesh::~Mesh() {
m_pool.free_mesh(*this);
}
uint32_t Mesh::get_mesh_index() const {
return m_meshIndex;
}
uint32_t Mesh::get_vertex_offset() const {
return m_vertexOffset;
}
uint32_t Mesh::get_index_offset() const {
return m_indexOffset;
}
uint32_t Mesh::get_index_count() const {
return m_indexCount;
}
|
whupdup/frame
|
real/graphics/mesh_pool.cpp
|
C++
|
gpl-3.0
| 3,584
|
#pragma once
#include <graphics/buffer_vector.hpp>
#include <scheduler/task_scheduler.hpp>
#include <vector>
namespace ZN::GFX {
constexpr const size_t INDIRECT_COMMAND_BUFFER_INITIAL_CAPACITY = 128ull;
class MeshPool;
class Mesh final : public Memory::ThreadSafeIntrusivePtrEnabled<Mesh> {
public:
explicit Mesh(MeshPool& pool, uint32_t vertexOffset, uint32_t indexOffset,
uint32_t indexCount, uint32_t meshIndex);
~Mesh();
NULL_COPY_AND_ASSIGN(Mesh);
uint32_t get_mesh_index() const;
uint32_t get_vertex_offset() const;
uint32_t get_index_offset() const;
uint32_t get_index_count() const;
private:
uint32_t m_vertexOffset;
uint32_t m_indexOffset;
uint32_t m_indexCount;
uint32_t m_meshIndex;
MeshPool& m_pool;
friend MeshPool;
};
class MeshPool {
public:
NULL_COPY_AND_ASSIGN(MeshPool);
uint32_t register_material();
virtual void free_mesh(Mesh&) = 0;
virtual VkIndexType get_index_type() const = 0;
uint32_t get_mesh_vertex_offset(uint32_t meshIndex) const;
uint32_t get_mesh_index_offset(uint32_t meshIndex) const;
uint32_t get_mesh_index_count(uint32_t meshIndex) const;
virtual Buffer& get_vertex_buffer() const = 0;
virtual Buffer& get_index_buffer() const = 0;
virtual uint32_t get_index_count() const = 0;
virtual uint32_t get_vertex_count() const = 0;
uint32_t get_mesh_count() const;
Buffer& get_zeroed_indirect_buffer() const;
Buffer& get_gpu_indirect_buffer() const;
VkDeviceSize get_indirect_buffer_offset(uint32_t materialIndex) const;
protected:
explicit MeshPool();
Memory::IntrusivePtr<Mesh> register_mesh(uint32_t vertexOffset, uint32_t indexOffset,
uint32_t indexCount);
private:
using IndirectCommandInputBuffer = BufferVector<VkDrawIndexedIndirectCommand,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT, INDIRECT_COMMAND_BUFFER_INITIAL_CAPACITY,
VMA_MEMORY_USAGE_CPU_TO_GPU>;
std::vector<Mesh*> m_meshes;
IndirectCommandInputBuffer m_zeroedIndirectCommandBuffer;
Memory::IntrusivePtr<Buffer> m_gpuIndirectCommandBuffer;
uint32_t m_materialCount;
void ensure_command_buffer_capacity();
protected:
Memory::IntrusivePtr<Scheduler::Mutex> m_mutex;
};
}
|
whupdup/frame
|
real/graphics/mesh_pool.hpp
|
C++
|
gpl-3.0
| 2,178
|
#include "non_owning_mesh_pool.hpp"
#include <core/scoped_lock.hpp>
using namespace ZN;
using namespace ZN::GFX;
// NonOwningMeshPool
NonOwningMeshPool::NonOwningMeshPool(MeshPool& parentPool)
: m_parentPool(&parentPool) {}
IntrusivePtr<Mesh> NonOwningMeshPool::create_mesh(Mesh& parentMesh, uint32_t indexCount,
uint32_t indexOffset) {
// FIXME: hold a reference to parentMesh to keep lifetimes sane
ScopedLock lock(*m_mutex);
return register_mesh(parentMesh.get_vertex_offset(), parentMesh.get_index_offset()
+ indexOffset, indexCount);
}
void NonOwningMeshPool::free_mesh(Mesh&) {
}
Buffer& NonOwningMeshPool::get_vertex_buffer() const {
return m_parentPool->get_vertex_buffer();
}
Buffer& NonOwningMeshPool::get_index_buffer() const {
return m_parentPool->get_index_buffer();
}
uint32_t NonOwningMeshPool::get_index_count() const {
return m_parentPool->get_index_count();
}
uint32_t NonOwningMeshPool::get_vertex_count() const {
return m_parentPool->get_vertex_count();
}
|
whupdup/frame
|
real/graphics/non_owning_mesh_pool.cpp
|
C++
|
gpl-3.0
| 1,001
|
#pragma once
#include <graphics/mesh_pool.hpp>
namespace ZN::GFX {
class NonOwningMeshPool : public MeshPool {
public:
explicit NonOwningMeshPool(MeshPool&);
[[nodiscard]] Memory::IntrusivePtr<Mesh> create_mesh(Mesh& parentMesh, uint32_t indexCount,
uint32_t indexOffset);
void free_mesh(Mesh&) override;
Buffer& get_vertex_buffer() const override;
Buffer& get_index_buffer() const override;
uint32_t get_index_count() const override;
uint32_t get_vertex_count() const override;
private:
MeshPool* m_parentPool;
};
}
|
whupdup/frame
|
real/graphics/non_owning_mesh_pool.hpp
|
C++
|
gpl-3.0
| 547
|
#include "owning_mesh_pool.hpp"
#include <core/scoped_lock.hpp>
#include <graphics/render_context.hpp>
#include <graphics/vk_initializers.hpp>
#include <Tracy.hpp>
using namespace ZN;
using namespace ZN::GFX;
static constexpr const size_t INITIAL_CAPACITY_BYTES = 16384;
// OwningMeshPool
OwningMeshPool::OwningMeshPool()
: m_vertexBuffer(Buffer::create(INITIAL_CAPACITY_BYTES,
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT
| VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_GPU_ONLY))
, m_indexBuffer(Buffer::create(INITIAL_CAPACITY_BYTES,
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT
| VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_GPU_ONLY))
, m_vertexCount(0)
, m_indexCount(0) {}
void OwningMeshPool::update() {
ZoneScopedN("OwningMeshPool Update");
ScopedLock lock(*m_mutex);
if (m_queuedBuffers.empty()) {
return;
}
size_t totalVertexSize = 0;
size_t totalIndexSize = 0;
for (auto& qbi : m_queuedBuffers) {
totalVertexSize += qbi.vertexCopy.size;
totalIndexSize += qbi.indexCopy.size;
}
// FIXME: Note that when the pool will have freed holes in it, an effort must be made to ensure
// that empty regions are not copied, as they may race with queued buffers
ensure_vertex_capacity(m_vertexBytesUsed + totalVertexSize);
ensure_index_capacity(m_indexBytesUsed + totalIndexSize);
m_vertexBytesUsed += totalVertexSize;
m_indexBytesUsed += totalIndexSize;
auto& uploadCtx = g_renderContext->get_upload_context();
for (auto& qbi : m_queuedBuffers) {
uploadCtx.upload(*qbi.vertexBuffer, *m_vertexBuffer, qbi.vertexCopy.srcOffset,
qbi.vertexCopy.dstOffset, qbi.vertexCopy.size);
uploadCtx.upload(*qbi.indexBuffer, *m_indexBuffer, qbi.indexCopy.srcOffset,
qbi.indexCopy.dstOffset, qbi.indexCopy.size);
}
m_queuedBuffers.clear();
}
void OwningMeshPool::free_mesh(Mesh&) {
// FIXME: handle freeing meshes
}
IntrusivePtr<Mesh> OwningMeshPool::create_mesh_internal(uint32_t vertexCount,
uint32_t indexCount) {
ScopedLock lock(*m_mutex);
auto mesh = register_mesh(m_vertexCount, m_indexCount, indexCount);
m_indexCount += indexCount;
m_vertexCount += vertexCount;
return mesh;
}
void OwningMeshPool::upload_mesh_internal(Mesh& mesh, Buffer& srcVertexBuffer,
Buffer& srcIndexBuffer, VkDeviceSize srcVertexOffset, VkDeviceSize srcIndexOffset,
VkDeviceSize vertexCount, VkDeviceSize vertexSizeBytes, VkDeviceSize indexSizeBytes) {
ScopedLock lock(*m_mutex);
auto dstVertexOffset = static_cast<VkDeviceSize>(mesh.get_vertex_offset()) * vertexSizeBytes;
auto dstIndexOffset = static_cast<VkDeviceSize>(mesh.get_index_offset()) * indexSizeBytes;
auto vertexCopySize = vertexCount * vertexSizeBytes;
auto indexCopySize = static_cast<VkDeviceSize>(mesh.get_index_count()) * indexSizeBytes;
m_queuedBuffers.emplace_back(QueuedBufferInfo{
.handle = mesh.reference_from_this(),
.vertexBuffer = srcVertexBuffer.reference_from_this(),
.indexBuffer = srcIndexBuffer.reference_from_this(),
.vertexCopy = {
.srcOffset = srcVertexOffset,
.dstOffset = dstVertexOffset,
.size = vertexCopySize
},
.indexCopy = {
.srcOffset = srcIndexOffset,
.dstOffset = dstIndexOffset,
.size = indexCopySize
},
});
}
Buffer& OwningMeshPool::get_vertex_buffer() const {
ScopedLock lock(*m_mutex);
return *m_vertexBuffer;
}
Buffer& OwningMeshPool::get_index_buffer() const {
ScopedLock lock(*m_mutex);
return *m_indexBuffer;
}
uint32_t OwningMeshPool::get_index_count() const {
ScopedLock lock(*m_mutex);
return m_indexCount;
}
uint32_t OwningMeshPool::get_vertex_count() const {
ScopedLock lock(*m_mutex);
return m_vertexCount;
}
void OwningMeshPool::ensure_vertex_capacity(size_t desiredSize) {
if (desiredSize > m_vertexBuffer->get_size()) {
auto newCapacity = m_vertexBuffer->get_size() * 2;
while (newCapacity < desiredSize) {
newCapacity *= 2;
}
auto newBuffer = Buffer::create(newCapacity, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
| VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VMA_MEMORY_USAGE_GPU_ONLY);
if (m_vertexBytesUsed > 0) {
g_renderContext->get_upload_context().upload(*m_vertexBuffer, *newBuffer,
0, 0, m_vertexBytesUsed);
}
m_vertexBuffer = std::move(newBuffer);
}
}
void OwningMeshPool::ensure_index_capacity(size_t desiredSize) {
if (desiredSize > m_indexBuffer->get_size()) {
auto newCapacity = m_indexBuffer->get_size() * 2;
while (newCapacity < desiredSize) {
newCapacity *= 2;
}
auto newBuffer = Buffer::create(newCapacity, VK_BUFFER_USAGE_INDEX_BUFFER_BIT
| VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VMA_MEMORY_USAGE_GPU_ONLY);
if (m_indexBytesUsed > 0) {
g_renderContext->get_upload_context().upload(*m_indexBuffer, *newBuffer, 0, 0,
m_indexBytesUsed);
}
m_indexBuffer = std::move(newBuffer);
}
}
|
whupdup/frame
|
real/graphics/owning_mesh_pool.cpp
|
C++
|
gpl-3.0
| 4,904
|
#pragma once
#include <graphics/mesh_pool.hpp>
namespace ZN::GFX {
class CommandBuffer;
class FrameGraphPass;
class OwningMeshPool : public MeshPool {
public:
void update();
void free_mesh(Mesh&) override;
Buffer& get_vertex_buffer() const override;
Buffer& get_index_buffer() const override;
uint32_t get_index_count() const override;
uint32_t get_vertex_count() const override;
protected:
explicit OwningMeshPool();
[[nodiscard]] Memory::IntrusivePtr<Mesh> create_mesh_internal(uint32_t vertexCount,
uint32_t indexCount);
void upload_mesh_internal(Mesh&, Buffer& srcVertexBuffer, Buffer& srcIndexBuffer,
VkDeviceSize srcVertexOffset, VkDeviceSize srcIndexOffset,
VkDeviceSize vertexCount, VkDeviceSize vertexSizeBytes,
VkDeviceSize indexSizeBytes);
private:
struct QueuedBufferInfo {
Memory::IntrusivePtr<Mesh> handle;
Memory::IntrusivePtr<Buffer> vertexBuffer;
Memory::IntrusivePtr<Buffer> indexBuffer;
VkBufferCopy vertexCopy;
VkBufferCopy indexCopy;
};
std::vector<QueuedBufferInfo> m_queuedBuffers;
Memory::IntrusivePtr<Buffer> m_vertexBuffer;
Memory::IntrusivePtr<Buffer> m_indexBuffer;
size_t m_vertexBytesUsed = {};
size_t m_indexBytesUsed = {};
uint32_t m_vertexCount = {};
uint32_t m_indexCount = {};
// std::vector<Pair<uint32_t, uint32_t>> m_allocatedRanges;
// std::vector<uint32_t> m_freeOffsets;
void reserve_internal(uint32_t vertexCount, uint32_t indexCount, uint32_t vertexSizeBytes,
VkIndexType);
void ensure_vertex_capacity(size_t desiredSize);
void ensure_index_capacity(size_t desiredSize);
};
}
|
whupdup/frame
|
real/graphics/owning_mesh_pool.hpp
|
C++
|
gpl-3.0
| 1,616
|
#include "pipeline_layout.hpp"
#include <graphics/vk_common.hpp>
#include <algorithm>
#include <cassert>
#include <cstring>
using namespace ZN;
using namespace ZN::GFX;
PipelineLayoutCache::PipelineLayoutCache(VkDevice device)
: m_device(device) {}
PipelineLayoutCache::~PipelineLayoutCache() {
for (auto& [_, layout] : m_layouts) {
vkDestroyPipelineLayout(m_device, layout, nullptr);
}
}
VkPipelineLayout PipelineLayoutCache::get(const VkPipelineLayoutCreateInfo& createInfo) {
assert(createInfo.setLayoutCount <= 8 && "Need to increase amount of stored layouts");
assert(createInfo.pushConstantRangeCount <= 2 && "Need to increase amount of constant ranges");
LayoutInfo layoutInfo{};
layoutInfo.numSetLayouts = createInfo.setLayoutCount;
layoutInfo.numRanges = createInfo.pushConstantRangeCount;
memcpy(layoutInfo.setLayouts, createInfo.pSetLayouts,
createInfo.setLayoutCount * sizeof(VkDescriptorSetLayout));
if (createInfo.pushConstantRangeCount > 0) {
memcpy(layoutInfo.constantRanges, createInfo.pPushConstantRanges,
createInfo.pushConstantRangeCount * sizeof(VkPushConstantRange));
}
std::sort(layoutInfo.setLayouts, layoutInfo.setLayouts + layoutInfo.numSetLayouts);
if (auto it = m_layouts.find(layoutInfo); it != m_layouts.end()) {
return it->second;
}
VkPipelineLayout layout;
VK_CHECK(vkCreatePipelineLayout(m_device, &createInfo, nullptr, &layout));
m_layouts.emplace(std::make_pair(layoutInfo, layout));
return layout;
}
// LayoutInfo
bool PipelineLayoutCache::LayoutInfo::operator==(const LayoutInfo& other) const {
if (numSetLayouts != other.numSetLayouts) {
return false;
}
if (numRanges != other.numRanges) {
return false;
}
for (uint32_t i = 0; i < numSetLayouts; ++i) {
if (setLayouts[i] != other.setLayouts[i]) {
return false;
}
}
for (uint32_t i = 0; i < numRanges; ++i) {
if (constantRanges[i].stageFlags != other.constantRanges[i].stageFlags) {
return false;
}
if (constantRanges[i].offset != other.constantRanges[i].offset) {
return false;
}
if (constantRanges[i].size != other.constantRanges[i].size) {
return false;
}
}
return true;
}
size_t PipelineLayoutCache::LayoutInfo::hash() const {
size_t result = std::hash<uint32_t>{}(numSetLayouts);
for (uint32_t i = 0; i < numSetLayouts; ++i) {
result ^= std::hash<size_t>{}(reinterpret_cast<size_t>(setLayouts[i]) << (4 * i));
}
for (uint32_t i = 0; i < numRanges; ++i) {
size_t value = constantRanges[i].stageFlags | (constantRanges[i].size << 8)
| (constantRanges[i].offset << 16);
result ^= std::hash<size_t>{}(value << (32 * i));
}
return result;
}
|
whupdup/frame
|
real/graphics/pipeline_layout.cpp
|
C++
|
gpl-3.0
| 2,643
|
#pragma once
#include <core/common.hpp>
#include <volk.h>
#include <unordered_map>
namespace ZN::GFX {
class PipelineLayoutCache final {
public:
explicit PipelineLayoutCache(VkDevice);
~PipelineLayoutCache();
NULL_COPY_AND_ASSIGN(PipelineLayoutCache);
struct LayoutInfo {
VkDescriptorSetLayout setLayouts[8];
VkPushConstantRange constantRanges[2];
uint32_t numSetLayouts;
uint32_t numRanges;
bool operator==(const LayoutInfo&) const;
size_t hash() const;
};
VkPipelineLayout get(const VkPipelineLayoutCreateInfo&);
private:
struct LayoutInfoHash {
size_t operator()(const LayoutInfo& info) const {
return info.hash();
}
};
std::unordered_map<LayoutInfo, VkPipelineLayout, LayoutInfoHash> m_layouts;
VkDevice m_device;
};
}
|
whupdup/frame
|
real/graphics/pipeline_layout.hpp
|
C++
|
gpl-3.0
| 785
|
#include "queue.hpp"
#include <core/scoped_lock.hpp>
#include <graphics/fence.hpp>
#include <graphics/vk_common.hpp>
using namespace ZN;
using namespace ZN::GFX;
Queue::Queue(VkQueue queue)
: m_queue(queue)
, m_mutex(Scheduler::Mutex::create()) {}
void Queue::submit(const VkSubmitInfo& submitInfo, Fence* fence) {
ScopedLock lock(*m_mutex);
VK_CHECK(vkQueueSubmit(m_queue, 1, &submitInfo, fence ? fence->get_fence() : VK_NULL_HANDLE));
}
VkResult Queue::present(const VkPresentInfoKHR& presentInfo) {
ScopedLock lock(*m_mutex);
return vkQueuePresentKHR(m_queue, &presentInfo);
}
VkQueue Queue::get_queue() const {
return m_queue;
}
Queue::operator VkQueue() const {
return m_queue;
}
|
whupdup/frame
|
real/graphics/queue.cpp
|
C++
|
gpl-3.0
| 704
|
#pragma once
#include <scheduler/task_scheduler.hpp>
#include <volk.h>
namespace ZN::GFX {
class Fence;
class RenderContext;
class Queue final {
public:
explicit Queue(VkQueue);
NULL_COPY_AND_ASSIGN(Queue);
void submit(const VkSubmitInfo&, Fence*);
VkResult present(const VkPresentInfoKHR&);
VkQueue get_queue() const;
operator VkQueue() const;
private:
VkQueue m_queue;
IntrusivePtr<Scheduler::Mutex> m_mutex;
};
}
|
whupdup/frame
|
real/graphics/queue.hpp
|
C++
|
gpl-3.0
| 445
|
#include "render_context.hpp"
#include <VkBootstrap.h>
#include <core/scoped_lock.hpp>
#include <graphics/vk_common.hpp>
#include <graphics/vk_initializers.hpp>
//#include <graphics/vk_profiler.hpp>
#include <services/application.hpp>
#include <TracyVulkan.hpp>
#include <TracyC.h>
using namespace ZN;
using namespace ZN::GFX;
// DELETION QUEUE
bool DeletionQueue::is_empty() const {
return m_deletors.empty();
}
void DeletionQueue::flush() {
for (auto it = m_deletors.rbegin(), end = m_deletors.rend(); it != end; ++it) {
(*it)();
}
m_deletors.clear();
}
// CONSTRUCTORS/DESTRUCTORS
RenderContext::RenderContext(Window& windowIn)
: m_validSwapchain(true)
//, m_frames{}
, m_frameCounter{}
, m_window(windowIn)
, m_swapchainMutex(Scheduler::Mutex::create()) {
vulkan_init();
allocator_init();
m_semaphorePool.create();
swapchain_init();
frame_data_init();
descriptors_init();
m_uploadContext.create();
}
RenderContext::~RenderContext() {
m_swapchainImages.clear();
m_swapchainImageViews.clear();
vkDeviceWaitIdle(m_device);
m_profileCommandPool = {};
m_uploadContext.destroy();
m_textureRegistry.destroy();
for (size_t i = 0; i < FRAMES_IN_FLIGHT; ++i) {
auto& frame = m_frames[i];
frame.renderFence = nullptr;
frame.mainCommandBuffer = nullptr;
frame.uploadCommandBuffer = nullptr;
frame.frameGraph.destroy();
frame.deletionQueue.flush();
}
m_mainDeletionQueue.flush();
// flush frames again for stuff put in by the main deletion queue
for (size_t i = 0; i < FRAMES_IN_FLIGHT; ++i) {
auto& frame = m_frames[i];
frame.deletionQueue.flush();
}
for (auto& frame : m_frames) {
vkDestroySemaphore(m_device, frame.renderSemaphore, nullptr);
vkDestroySemaphore(m_device, frame.presentSemaphore, nullptr);
vkDestroySemaphore(m_device, frame.transferSemaphore, nullptr);
frame.descriptorAllocator.destroy();
}
m_semaphorePool.destroy();
vkDestroySwapchainKHR(m_device, m_swapchain, nullptr);
vmaDestroyAllocator(m_allocator);
//g_vulkanProfiler.destroy();
TracyVkDestroy(m_graphicsQueueContext);
vkDestroySurfaceKHR(m_instance, m_surface, nullptr);
vkDestroyDevice(m_device, nullptr);
vkb::destroy_debug_utils_messenger(m_instance, m_debugMessenger);
vkDestroyInstance(m_instance, nullptr);
}
// PUBLIC METHODS
void RenderContext::late_init() {
m_textureRegistry.create(128);
}
void RenderContext::frame_begin() {
auto& frame = m_frames[get_frame_index()];
{
ZoneScopedN("RenderFenceWait");
frame.renderFence->wait();
}
frame.frameGraph->clear();
frame.deletionQueue.flush();
frame.descriptorAllocator->reset_pools();
m_textureRegistry->update();
m_uploadContext->emit_commands(*frame.frameGraph);
if (m_validSwapchain) {
auto result = vkAcquireNextImageKHR(m_device, m_swapchain, UINT64_MAX,
frame.presentSemaphore, VK_NULL_HANDLE, &frame.imageIndex);
if (result == VK_ERROR_OUT_OF_DATE_KHR) {
m_validSwapchain = false;
}
else if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
VK_CHECK(result);
}
}
if (m_validSwapchain) {
frame.renderFence->reset();
}
//g_vulkanProfiler->grab_queries(*frame.mainCommandBuffer);
}
void RenderContext::frame_end() {
ZoneScopedN("Frame End");
auto& frame = m_frames[get_frame_index()];
frame.mainCommandBuffer->recording_begin();
frame.uploadCommandBuffer->recording_begin();
frame.frameGraph->build(*frame.mainCommandBuffer, *frame.uploadCommandBuffer);
frame.uploadCommandBuffer->recording_end();
frame.mainCommandBuffer->recording_end();
if (m_graphicsQueueFamily != m_transferQueueFamily) {
VkSubmitInfo submitInfo{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1,
.pCommandBuffers = frame.uploadCommandBuffer->get_buffers(),
.signalSemaphoreCount = 1,
.pSignalSemaphores = &frame.transferSemaphore
};
m_transferQueue->submit(submitInfo, nullptr);
}
if (m_validSwapchain) {
if (m_graphicsQueueFamily != m_transferQueueFamily) {
VkPipelineStageFlags waitStages[] = {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT};
VkSemaphore waitSemaphores[] = {frame.presentSemaphore, frame.transferSemaphore};
VkSubmitInfo submitInfo{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 2,
.pWaitSemaphores = waitSemaphores,
.pWaitDstStageMask = waitStages,
.commandBufferCount = 1,
.pCommandBuffers = frame.mainCommandBuffer->get_buffers(),
.signalSemaphoreCount = 1,
.pSignalSemaphores = &frame.renderSemaphore
};
ZoneScopedN("QueueSubmit");
m_graphicsQueue->submit(submitInfo, frame.renderFence.get());
}
else {
VkPipelineStageFlags waitStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
VkCommandBuffer commandBuffers[] = {*frame.uploadCommandBuffer,
*frame.mainCommandBuffer};
VkSubmitInfo submitInfo{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 1,
.pWaitSemaphores = &frame.presentSemaphore,
.pWaitDstStageMask = &waitStage,
.commandBufferCount = 2,
.pCommandBuffers = commandBuffers,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &frame.renderSemaphore
};
ZoneScopedN("QueueSubmit");
m_graphicsQueue->submit(submitInfo, frame.renderFence.get());
}
VkPresentInfoKHR presentInfo{};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = &frame.renderSemaphore;
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = &m_swapchain;
presentInfo.pImageIndices = &frame.imageIndex;
{
ZoneScopedN("QueuePresent");
auto result = get_present_queue().present(presentInfo);
if (result == VK_ERROR_OUT_OF_DATE_KHR) {
m_validSwapchain = false;
}
else if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
VK_CHECK(result);
}
}
}
++m_frameCounter;
}
VkDescriptorSetLayout RenderContext::descriptor_set_layout_create(
const VkDescriptorSetLayoutCreateInfo& createInfo) {
return m_descriptorLayoutCache->get(createInfo);
}
VkPipelineLayout RenderContext::pipeline_layout_create(
const VkPipelineLayoutCreateInfo& createInfo) {
return m_pipelineLayoutCache->get(createInfo);
}
DescriptorBuilder RenderContext::global_descriptor_set_begin() {
return DescriptorBuilder(*m_descriptorLayoutCache, *m_globalDescriptorAllocator);
}
DescriptorBuilder RenderContext::dynamic_descriptor_set_begin() {
return DescriptorBuilder(*m_descriptorLayoutCache,
*m_frames[get_frame_index()].descriptorAllocator);
}
DescriptorBuilder RenderContext::global_update_after_bind_descriptor_set_begin() {
return DescriptorBuilder(*m_descriptorLayoutCache,
*m_globalUpdateAfterBindDescriptorAllocator);
}
void RenderContext::mark_image_ready(ImageView& imageView) {
m_readyImages.enqueue(imageView.reference_from_this());
}
bool RenderContext::get_ready_image(IntrusivePtr<ImageView>& pImageView) {
return m_readyImages.try_dequeue(pImageView);
}
size_t RenderContext::pad_uniform_buffer_size(size_t originalSize) const {
size_t minUboAlignment = m_gpuProperties.limits.minUniformBufferOffsetAlignment;
if (minUboAlignment == 0) {
return originalSize;
}
return (originalSize + minUboAlignment - 1) & ~(minUboAlignment - 1);
}
VkInstance RenderContext::get_instance() {
return m_instance;
}
VkPhysicalDevice RenderContext::get_physical_device() {
return m_physicalDevice;
}
VkDevice RenderContext::get_device() {
return m_device;
}
VmaAllocator RenderContext::get_allocator() {
return m_allocator;
}
Queue& RenderContext::get_transfer_queue() {
return m_transferQueue ? *m_transferQueue : *m_graphicsQueue;
}
uint32_t RenderContext::get_transfer_queue_family() const {
return m_transferQueueFamily;
}
Queue& RenderContext::get_graphics_queue() {
return *m_graphicsQueue;
}
uint32_t RenderContext::get_graphics_queue_family() const {
return m_graphicsQueueFamily;
}
Queue& RenderContext::get_present_queue() {
return m_presentQueue ? *m_presentQueue : *m_graphicsQueue;
}
uint32_t RenderContext::get_present_queue_family() const {
return m_presentQueueFamily;
}
tracy::VkCtx* RenderContext::get_tracy_context() const {
return m_graphicsQueueContext;
}
VkSemaphore RenderContext::acquire_semaphore() {
return m_semaphorePool->acquire_semaphore();
}
void RenderContext::release_semaphore(VkSemaphore sem) {
m_semaphorePool->release_semaphore(sem);
}
VkFormat RenderContext::get_swapchain_image_format() const {
return m_swapchainImageFormat;
}
VkExtent2D RenderContext::get_swapchain_extent() const {
return {static_cast<uint32_t>(m_window.get_width()),
static_cast<uint32_t>(m_window.get_height())};
}
VkExtent3D RenderContext::get_swapchain_extent_3d() const {
return {static_cast<uint32_t>(m_window.get_width()),
static_cast<uint32_t>(m_window.get_height()), 1};
}
CommandBuffer& RenderContext::get_main_command_buffer() const {
return *m_frames[get_frame_index()].mainCommandBuffer;
}
FrameGraph& RenderContext::get_frame_graph() {
return *m_frames[get_frame_index()].frameGraph;
}
UploadContext& RenderContext::get_upload_context() {
return *m_uploadContext;
}
TextureRegistry& RenderContext::get_texture_registry() {
return *m_textureRegistry;
}
size_t RenderContext::get_frame_number() const {
return m_frameCounter;
}
size_t RenderContext::get_frame_index() const {
if constexpr (FRAMES_IN_FLIGHT == 2) {
return (m_frameCounter & 1);
}
else {
return m_frameCounter % FRAMES_IN_FLIGHT;
}
}
size_t RenderContext::get_last_frame_index() const {
if constexpr (FRAMES_IN_FLIGHT == 2) {
return (m_frameCounter - 1) & 1;
}
else {
return (m_frameCounter - 1) % FRAMES_IN_FLIGHT;
}
}
Image& RenderContext::get_swapchain_image() const {
return *m_swapchainImages[get_swapchain_image_index()];
}
ImageView& RenderContext::get_swapchain_image_view() const {
return *m_swapchainImageViews[get_swapchain_image_index()];
}
ImageView& RenderContext::get_swapchain_image_view(uint32_t index) const {
return *m_swapchainImageViews[index];
}
uint32_t RenderContext::get_swapchain_image_index() const {
return m_frames[get_frame_index()].imageIndex;
}
uint32_t RenderContext::get_swapchain_image_count() const {
return static_cast<uint32_t>(m_swapchainImages.size());
}
IntrusivePtr<Scheduler::Mutex> RenderContext::get_swapchain_mutex() const {
return m_swapchainMutex;
}
RenderContext::ResizeEvent& RenderContext::swapchain_resize_event() {
return m_resizeEvent;
}
// PRIVATE METHODS
// INIT
void RenderContext::vulkan_init() {
ZoneScopedN("Vulkan");
VK_CHECK(volkInitialize());
TracyCZoneN(ctx1, "Instance", true);
vkb::InstanceBuilder instanceBuilder;
auto vkbInstance = instanceBuilder
.set_app_name("My Vulkan Engine")
.request_validation_layers(true)
.require_api_version(1, 1, 0)
.use_default_debug_messenger()
.build()
.value();
m_instance = vkbInstance.instance;
m_debugMessenger = vkbInstance.debug_messenger;
TracyCZoneN(ctx2, "Volk", true);
volkLoadInstance(m_instance);
TracyCZoneEnd(ctx2);
TracyCZoneEnd(ctx1);
TracyCZoneN(ctx3, "Surface", true);
VK_CHECK(glfwCreateWindowSurface(m_instance, m_window.get_handle(), nullptr, &m_surface));
TracyCZoneEnd(ctx3);
VkPhysicalDeviceFeatures features{};
features.fragmentStoresAndAtomics = true;
features.pipelineStatisticsQuery = true;
features.multiDrawIndirect = true;
TracyCZoneN(ctx4, "PhysicalDevice", true);
vkb::PhysicalDeviceSelector selector{vkbInstance};
auto vkbPhysicalDevice = selector
.set_minimum_version(1, 1)
.set_surface(m_surface)
.set_required_features(features)
.add_required_extension("VK_EXT_descriptor_indexing")
//.add_required_extension("VK_NV_device_diagnostic_checkpoints")
.select()
.value();
TracyCZoneEnd(ctx4);
TracyCZoneN(ctx5, "Device", true);
VkPhysicalDeviceDescriptorIndexingFeatures descriptorIndexing{};
descriptorIndexing.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES;
descriptorIndexing.shaderSampledImageArrayNonUniformIndexing = VK_TRUE;
descriptorIndexing.runtimeDescriptorArray = VK_TRUE;
descriptorIndexing.descriptorBindingVariableDescriptorCount = VK_TRUE;
descriptorIndexing.descriptorBindingPartiallyBound = VK_TRUE;
descriptorIndexing.descriptorBindingSampledImageUpdateAfterBind = VK_TRUE;
vkb::DeviceBuilder deviceBuilder{vkbPhysicalDevice};
auto vkbDevice = deviceBuilder
.add_pNext(&descriptorIndexing)
.build()
.value();
m_device = vkbDevice.device;
m_physicalDevice = vkbPhysicalDevice.physical_device;
m_gpuProperties = vkbDevice.physical_device.properties;
TracyCZoneN(ctx6, "Volk", true);
volkLoadDevice(m_device);
TracyCZoneEnd(ctx6);
TracyCZoneEnd(ctx5);
auto graphicsQueue = vkbDevice.get_queue(vkb::QueueType::graphics).value();
auto presentQueue = vkbDevice.get_queue(vkb::QueueType::present).value();
m_graphicsQueue.create(graphicsQueue);
m_graphicsQueueFamily = vkbDevice.get_queue_index(vkb::QueueType::graphics).value();
m_presentQueueFamily = vkbDevice.get_queue_index(vkb::QueueType::present).value();
if (presentQueue != graphicsQueue) {
m_presentQueue.create(presentQueue);
}
if (vkbPhysicalDevice.has_dedicated_transfer_queue()) {
m_transferQueue.create(vkbDevice.get_dedicated_queue(vkb::QueueType::transfer).value());
m_transferQueueFamily = vkbDevice.get_dedicated_queue_index(vkb::QueueType::transfer)
.value();
}
else {
m_transferQueueFamily = m_graphicsQueueFamily;
}
//g_vulkanProfiler.create(m_device, m_gpuProperties.limits.timestampPeriod);
}
void RenderContext::allocator_init() {
ZoneScopedN("Allocator");
VmaVulkanFunctions vkFns{};
vkFns.vkGetInstanceProcAddr = vkGetInstanceProcAddr;
vkFns.vkGetDeviceProcAddr = vkGetDeviceProcAddr;
vkFns.vkGetPhysicalDeviceProperties = vkGetPhysicalDeviceProperties;
vkFns.vkGetPhysicalDeviceMemoryProperties = vkGetPhysicalDeviceMemoryProperties;
vkFns.vkAllocateMemory = vkAllocateMemory;
vkFns.vkFreeMemory = vkFreeMemory;
vkFns.vkMapMemory = vkMapMemory;
vkFns.vkUnmapMemory = vkUnmapMemory;
vkFns.vkFlushMappedMemoryRanges = vkFlushMappedMemoryRanges;
vkFns.vkInvalidateMappedMemoryRanges = vkInvalidateMappedMemoryRanges;
vkFns.vkBindBufferMemory = vkBindBufferMemory;
vkFns.vkBindImageMemory = vkBindImageMemory;
vkFns.vkGetBufferMemoryRequirements = vkGetBufferMemoryRequirements;
vkFns.vkGetImageMemoryRequirements = vkGetImageMemoryRequirements;
vkFns.vkCreateBuffer = vkCreateBuffer;
vkFns.vkDestroyBuffer = vkDestroyBuffer;
vkFns.vkCreateImage = vkCreateImage;
vkFns.vkDestroyImage = vkDestroyImage;
vkFns.vkCmdCopyBuffer = vkCmdCopyBuffer;
VmaAllocatorCreateInfo allocatorInfo{};
allocatorInfo.physicalDevice = m_physicalDevice;
allocatorInfo.device = m_device;
allocatorInfo.instance = m_instance;
allocatorInfo.pVulkanFunctions = &vkFns;
VK_CHECK(vmaCreateAllocator(&allocatorInfo, &m_allocator));
}
void RenderContext::swapchain_init() {
ZoneScopedN("Swapchain");
uint32_t surfaceFormatCount{};
VK_CHECK(vkGetPhysicalDeviceSurfaceFormatsKHR(m_physicalDevice, m_surface, &surfaceFormatCount,
nullptr));
std::vector<VkSurfaceFormatKHR> surfaceFormats(surfaceFormatCount);
VK_CHECK(vkGetPhysicalDeviceSurfaceFormatsKHR(m_physicalDevice, m_surface, &surfaceFormatCount,
surfaceFormats.data()));
VkSurfaceFormatKHR* preferredFormat = &surfaceFormats[0];
for (size_t i = 1; i < surfaceFormats.size(); ++i) {
if (surfaceFormats[i].format == VK_FORMAT_B8G8R8A8_UNORM) {
preferredFormat = &surfaceFormats[i];
break;
}
}
m_preferredSurfaceFormat = *preferredFormat;
vkb::SwapchainBuilder builder{m_physicalDevice, m_device, m_surface};
auto vkbSwapchain = builder
.set_desired_format(m_preferredSurfaceFormat)
//.use_default_format_selection()
.set_desired_present_mode(VK_PRESENT_MODE_FIFO_KHR) // change to mailbox later
//.set_desired_present_mode(VK_PRESENT_MODE_MAILBOX_KHR)
.set_desired_extent(m_window.get_width(), m_window.get_height())
.build()
.value();
m_swapchain = vkbSwapchain.swapchain;
m_swapchainImageFormat = vkbSwapchain.image_format;
auto images = vkbSwapchain.get_images().value();
for (auto img : images) {
m_swapchainImages.emplace_back(Image::create(img, m_swapchainImageFormat,
get_swapchain_extent_3d(), {}));
}
auto imageViews = vkbSwapchain.get_image_views().value();
for (size_t i = 0; i < imageViews.size(); ++i) {
m_swapchainImageViews.emplace_back(ImageView::create(*m_swapchainImages[i], imageViews[i],
{}));
}
m_window.resize_event().connect([&](int width, int height) {
swap_chain_recreate(width, height);
});
}
void RenderContext::frame_data_init() {
ZoneScopedN("Frame Data");
VkSemaphoreCreateInfo semaphoreCreateInfo{};
semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
for (size_t i = 0; i < FRAMES_IN_FLIGHT; ++i) {
auto& frameData = m_frames[i];
frameData.mainCommandBuffer = CommandBuffer::create(m_device, m_graphicsQueueFamily);
frameData.uploadCommandBuffer = CommandBuffer::create(m_device, m_transferQueueFamily);
frameData.descriptorAllocator.create(m_device);
frameData.frameGraph.create();
frameData.renderFence = Fence::create(m_device, VK_FENCE_CREATE_SIGNALED_BIT);
VK_CHECK(vkCreateSemaphore(m_device, &semaphoreCreateInfo, nullptr,
&frameData.presentSemaphore));
VK_CHECK(vkCreateSemaphore(m_device, &semaphoreCreateInfo, nullptr,
&frameData.renderSemaphore));
VK_CHECK(vkCreateSemaphore(m_device, &semaphoreCreateInfo, nullptr,
&frameData.transferSemaphore));
}
m_profileCommandPool = CommandPool::create(m_device, m_graphicsQueueFamily,
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferAllocateInfo allocInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.commandPool = *m_profileCommandPool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = 1
};
VkCommandBuffer profileCmd;
VK_CHECK(vkAllocateCommandBuffers(m_device, &allocInfo, &profileCmd));
m_graphicsQueueContext = TracyVkContext(m_physicalDevice, m_device, *m_graphicsQueue,
profileCmd);
}
void RenderContext::descriptors_init() {
ZoneScopedN("Descriptors");
m_pipelineLayoutCache.create(m_device);
m_descriptorLayoutCache.create(m_device);
m_globalDescriptorAllocator.create(m_device);
m_globalUpdateAfterBindDescriptorAllocator.create(m_device,
VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT);
m_mainDeletionQueue.push_back([this] {
m_globalUpdateAfterBindDescriptorAllocator.destroy();
m_globalDescriptorAllocator.destroy();
m_descriptorLayoutCache.destroy();
m_pipelineLayoutCache.destroy();
});
}
void RenderContext::swap_chain_recreate(int width, int height) {
if (width == 0 || height == 0) {
return;
}
ScopedLock lock(*m_swapchainMutex);
vkb::SwapchainBuilder builder{m_physicalDevice, m_device, m_surface};
auto vkbSwapchain = builder
.set_desired_format(m_preferredSurfaceFormat)
.set_old_swapchain(m_swapchain)
.set_desired_present_mode(VK_PRESENT_MODE_FIFO_KHR) // change to mailbox later
.set_desired_extent(width, height)
.build()
.value();
for (auto& imageView : m_swapchainImageViews) {
imageView->delete_late();
}
for (auto& image : m_swapchainImages) {
image->delete_late();
}
m_swapchainImages.clear();
m_swapchainImageViews.clear();
queue_delete_late([device=m_device, swapchain=m_swapchain,
viewsToDelete=m_swapchainImageViews] {
vkDestroySwapchainKHR(device, swapchain, nullptr);
});
m_swapchain = vkbSwapchain.swapchain;
auto images = vkbSwapchain.get_images().value();
for (auto img : images) {
m_swapchainImages.emplace_back(Image::create(img, m_swapchainImageFormat,
get_swapchain_extent_3d(), {}));
}
auto imageViews = vkbSwapchain.get_image_views().value();
for (size_t i = 0; i < imageViews.size(); ++i) {
m_swapchainImageViews.emplace_back(ImageView::create(*m_swapchainImages[i], imageViews[i],
{}));
}
m_validSwapchain = true;
m_resizeEvent.fire(width, height);
}
|
whupdup/frame
|
real/graphics/render_context.cpp
|
C++
|
gpl-3.0
| 20,028
|
#pragma once
#include <core/events.hpp>
#include <core/local.hpp>
#include <graphics/command_buffer.hpp>
#include <graphics/descriptors.hpp>
#include <graphics/frame_graph.hpp>
#include <graphics/graphics_fwd.hpp>
#include <graphics/image_view.hpp>
#include <graphics/pipeline_layout.hpp>
#include <graphics/queue.hpp>
#include <graphics/semaphore_pool.hpp>
#include <graphics/texture_registry.hpp>
#include <graphics/upload_context.hpp>
#include <graphics/fence.hpp>
#include <functional>
#include <vector>
#include <concurrentqueue.h>
#include <vk_mem_alloc.h>
namespace tracy { class VkCtx; }
namespace ZN { class Window; }
namespace ZN::GFX {
class DeletionQueue {
public:
template <typename Deletor>
void push_back(Deletor&& deletor) {
static_assert(sizeof(Deletor) <= 200, "Warning: overallocating deletors");
m_deletors.push_back(std::move(deletor));
}
bool is_empty() const;
void flush();
private:
std::vector<std::function<void()>> m_deletors;
};
class RenderContext final {
public:
using ResizeEvent = Event::Dispatcher<int, int>;
explicit RenderContext(Window&);
~RenderContext();
NULL_COPY_AND_ASSIGN(RenderContext);
void late_init();
void frame_begin();
void frame_end();
[[nodiscard]] VkDescriptorSetLayout descriptor_set_layout_create(
const VkDescriptorSetLayoutCreateInfo&);
[[nodiscard]] VkPipelineLayout pipeline_layout_create(
const VkPipelineLayoutCreateInfo&);
DescriptorBuilder global_descriptor_set_begin();
DescriptorBuilder dynamic_descriptor_set_begin();
DescriptorBuilder global_update_after_bind_descriptor_set_begin();
void mark_image_ready(ImageView&);
bool get_ready_image(Memory::IntrusivePtr<ImageView>&);
template <typename Deletor>
void queue_delete(Deletor&& deletor) {
m_frames[get_frame_index()].deletionQueue.push_back(std::move(deletor));
}
template <typename Deletor>
void queue_delete_late(Deletor&& deletor) {
m_frames[get_last_frame_index()].deletionQueue.push_back(std::move(deletor));
}
void swap_chain_recreate(int width, int height);
size_t pad_uniform_buffer_size(size_t originalSize) const;
VkInstance get_instance();
VkPhysicalDevice get_physical_device();
VkDevice get_device();
VmaAllocator get_allocator();
GFX::Queue& get_transfer_queue();
uint32_t get_transfer_queue_family() const;
GFX::Queue& get_graphics_queue();
uint32_t get_graphics_queue_family() const;
GFX::Queue& get_present_queue();
uint32_t get_present_queue_family() const;
tracy::VkCtx* get_tracy_context() const;
VkCommandBuffer get_graphics_upload_command_buffer();
VkSemaphore acquire_semaphore();
void release_semaphore(VkSemaphore);
VkFormat get_swapchain_image_format() const;
VkExtent2D get_swapchain_extent() const;
VkExtent3D get_swapchain_extent_3d() const;
CommandBuffer& get_main_command_buffer() const;
FrameGraph& get_frame_graph();
UploadContext& get_upload_context();
TextureRegistry& get_texture_registry();
size_t get_frame_number() const;
size_t get_frame_index() const;
size_t get_last_frame_index() const;
Image& get_swapchain_image() const;
ImageView& get_swapchain_image_view() const;
ImageView& get_swapchain_image_view(uint32_t index) const;
uint32_t get_swapchain_image_index() const;
uint32_t get_swapchain_image_count() const;
Memory::IntrusivePtr<Scheduler::Mutex> get_swapchain_mutex() const;
ResizeEvent& swapchain_resize_event();
private:
struct FrameData {
Memory::IntrusivePtr<Fence> renderFence;
VkSemaphore presentSemaphore;
VkSemaphore renderSemaphore;
VkSemaphore transferSemaphore;
Memory::IntrusivePtr<CommandBuffer> mainCommandBuffer;
Memory::IntrusivePtr<CommandBuffer> uploadCommandBuffer;
Local<DescriptorAllocator> descriptorAllocator;
Local<FrameGraph> frameGraph;
uint32_t imageIndex;
DeletionQueue deletionQueue;
};
VkInstance m_instance;
VkDebugUtilsMessengerEXT m_debugMessenger;
VkPhysicalDevice m_physicalDevice;
VkDevice m_device;
VkSurfaceKHR m_surface;
Local<GFX::Queue> m_graphicsQueue;
Local<GFX::Queue> m_presentQueue;
Local<GFX::Queue> m_transferQueue;
uint32_t m_graphicsQueueFamily;
uint32_t m_transferQueueFamily;
uint32_t m_presentQueueFamily;
tracy::VkCtx* m_graphicsQueueContext;
VkSwapchainKHR m_swapchain;
VkSurfaceFormatKHR m_preferredSurfaceFormat;
VkFormat m_swapchainImageFormat;
std::vector<Memory::IntrusivePtr<Image>> m_swapchainImages;
std::vector<Memory::IntrusivePtr<ImageView>> m_swapchainImageViews;
bool m_validSwapchain;
VmaAllocator m_allocator;
Local<SemaphorePool> m_semaphorePool;
Local<DescriptorLayoutCache> m_descriptorLayoutCache;
Local<DescriptorAllocator> m_globalDescriptorAllocator;
Local<DescriptorAllocator> m_globalUpdateAfterBindDescriptorAllocator;
Local<PipelineLayoutCache> m_pipelineLayoutCache;
Memory::IntrusivePtr<CommandPool> m_profileCommandPool;
Local<UploadContext> m_uploadContext;
Local<TextureRegistry> m_textureRegistry;
moodycamel::ConcurrentQueue<Memory::IntrusivePtr<ImageView>> m_readyImages;
FrameData m_frames[FRAMES_IN_FLIGHT];
size_t m_frameCounter;
Window& m_window;
ResizeEvent m_resizeEvent;
DeletionQueue m_mainDeletionQueue;
VkPhysicalDeviceProperties m_gpuProperties;
Memory::IntrusivePtr<Scheduler::Mutex> m_swapchainMutex;
void vulkan_init();
void allocator_init();
void swapchain_init();
void frame_data_init();
void descriptors_init();
};
inline Local<RenderContext> g_renderContext;
}
|
whupdup/frame
|
real/graphics/render_context.hpp
|
C++
|
gpl-3.0
| 5,553
|
#include "render_pass.hpp"
#include <core/hash_builder.hpp>
#include <core/logging.hpp>
#include <core/memory.hpp>
#include <core/scoped_lock.hpp>
#include <graphics/render_context.hpp>
#include <Tracy.hpp>
#include <cassert>
#include <vector>
#include <unordered_map>
using namespace ZN;
using namespace ZN::GFX;
namespace {
struct RenderPassCreateInfoHash {
size_t operator()(const RenderPass::CreateInfo&) const;
};
struct RenderPassContainer {
RenderPass* renderPass;
Memory::UniquePtr<RenderPass::SubpassInfo[]> subpassInfo;
};
struct DependencyList {
std::unordered_map<uint64_t, size_t> lookup;
std::vector<VkSubpassDependency> dependencies;
};
}
static int64_t g_counter = 0;
static std::unordered_map<RenderPass::CreateInfo, RenderPassContainer, RenderPassCreateInfoHash>
g_renderPassCache = {};
static IntrusivePtr<Scheduler::Mutex> g_mutex = Scheduler::Mutex::create();
static VkRenderPass render_pass_create(const RenderPass::CreateInfo& createInfo);
static bool has_depth_attachment(const RenderPass::CreateInfo& createInfo);
IntrusivePtr<RenderPass> RenderPass::create(const RenderPass::CreateInfo& createInfo) {
ScopedLock lock(*g_mutex);
if (auto it = g_renderPassCache.find(createInfo); it != g_renderPassCache.end()) {
return it->second.renderPass->reference_from_this();
}
else {
lock.unlock();
ZoneScopedN("Create RenderPass");
auto renderPass = render_pass_create(createInfo);
if (renderPass == VK_NULL_HANDLE) {
return {};
}
auto ownedCreateInfo = createInfo;
auto ownedSubpassInfo = std::make_unique<SubpassInfo[]>(createInfo.subpassCount);
ownedCreateInfo.pSubpasses = ownedSubpassInfo.get();
memcpy(ownedSubpassInfo.get(), createInfo.pSubpasses,
static_cast<size_t>(createInfo.subpassCount) * sizeof(SubpassInfo));
Memory::IntrusivePtr result(new RenderPass(renderPass));
lock.lock();
g_renderPassCache.emplace(std::make_pair(std::move(ownedCreateInfo),
RenderPassContainer{result.get(), std::move(ownedSubpassInfo)}));
lock.unlock();
return result;
}
}
RenderPass::RenderPass(VkRenderPass renderPass)
: m_renderPass(renderPass) {
++g_counter;
TracyPlot("RenderPasses", g_counter);
}
RenderPass::~RenderPass() {
--g_counter;
TracyPlot("RenderPasses", g_counter);
if (m_renderPass != VK_NULL_HANDLE) {
g_renderContext->queue_delete([renderPass=this->m_renderPass] {
vkDestroyRenderPass(g_renderContext->get_device(), renderPass, nullptr);
});
}
ScopedLock lock(*g_mutex);
for (auto it = g_renderPassCache.begin(), end = g_renderPassCache.end(); it != end; ++it) {
if (it->second.renderPass == this) {
g_renderPassCache.erase(it);
return;
}
}
}
RenderPass::operator VkRenderPass() const {
return m_renderPass;
}
VkRenderPass RenderPass::get_render_pass() const {
return m_renderPass;
}
static VkAttachmentLoadOp get_load_op(RenderPass::AttachmentBitMask_T clearMask,
RenderPass::AttachmentBitMask_T loadMask, uint8_t index) {
if ((clearMask >> index) & 1) {
return VK_ATTACHMENT_LOAD_OP_CLEAR;
}
else if ((loadMask >> index) & 1) {
return VK_ATTACHMENT_LOAD_OP_LOAD;
}
else {
return VK_ATTACHMENT_LOAD_OP_DONT_CARE;
}
}
static VkImageLayout get_color_initial_layout(const RenderPass::CreateInfo& createInfo,
RenderPass::AttachmentCount_T index) {
if ((createInfo.loadAttachmentMask >> index) & 1) {
return ((createInfo.swapchainAttachmentMask >> index) & 1)
? VK_IMAGE_LAYOUT_PRESENT_SRC_KHR : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
else {
return VK_IMAGE_LAYOUT_UNDEFINED;
}
}
static VkImageLayout get_color_final_layout(const RenderPass::CreateInfo& createInfo,
RenderPass::AttachmentCount_T index) {
return ((createInfo.swapchainAttachmentMask >> index) & 1)
? VK_IMAGE_LAYOUT_PRESENT_SRC_KHR : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
static VkImageLayout get_depth_initial_layout(const RenderPass::CreateInfo& createInfo) {
return (createInfo.createFlags & RenderPass::CREATE_FLAG_LOAD_DEPTH_STENCIL)
? VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
: VK_IMAGE_LAYOUT_UNDEFINED;
}
static void build_attachment_descriptions(const RenderPass::CreateInfo& createInfo,
VkAttachmentDescription* attachDescs, bool hasDepthAttach) {
assert(!(createInfo.clearAttachmentMask & createInfo.loadAttachmentMask));
for (RenderPass::AttachmentCount_T i = 0; i < createInfo.colorAttachmentCount; ++i) {
auto& ai = createInfo.colorAttachments[i];
attachDescs[i] = {
.format = ai.format,
.samples = ai.samples,
.loadOp = get_load_op(createInfo.clearAttachmentMask, createInfo.loadAttachmentMask,
i),
.storeOp = ((createInfo.storeAttachmentMask >> i) & 1) ? VK_ATTACHMENT_STORE_OP_STORE
: VK_ATTACHMENT_STORE_OP_DONT_CARE,
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
.initialLayout = get_color_initial_layout(createInfo, i),
.finalLayout = get_color_final_layout(createInfo, i)
};
}
if (hasDepthAttach) {
VkAttachmentLoadOp loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
VkAttachmentStoreOp storeOp = (createInfo.createFlags
& RenderPass::CREATE_FLAG_STORE_DEPTH_STENCIL)
? VK_ATTACHMENT_STORE_OP_STORE : VK_ATTACHMENT_STORE_OP_DONT_CARE;
if (createInfo.createFlags & RenderPass::CREATE_FLAG_CLEAR_DEPTH_STENCIL) {
loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
}
else if (createInfo.createFlags & RenderPass::CREATE_FLAG_LOAD_DEPTH_STENCIL) {
loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
}
attachDescs[createInfo.colorAttachmentCount] = {
.format = createInfo.depthAttachment.format,
.samples = createInfo.depthAttachment.samples,
.loadOp = loadOp,
.storeOp = storeOp,
.stencilLoadOp = loadOp,
.stencilStoreOp = storeOp,
.initialLayout = get_depth_initial_layout(createInfo),
.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
};
}
}
static const VkAttachmentReference* create_attach_ref_list(
const RenderPass::AttachmentIndex_T* indices, RenderPass::AttachmentCount_T count,
VkImageLayout layout, std::vector<std::vector<VkAttachmentReference>>& attachRefLists) {
std::vector<VkAttachmentReference> attachRefs(count);
for (RenderPass::AttachmentCount_T i = 0; i < count; ++i) {
attachRefs[i].attachment = indices[i];
attachRefs[i].layout = layout;
}
attachRefLists.emplace_back(std::move(attachRefs));
return attachRefLists.back().data();
}
static void build_subpass_descriptions(const RenderPass::CreateInfo& createInfo,
VkSubpassDescription* subpasses,
std::vector<std::vector<VkAttachmentReference>>& attachRefLists) {
RenderPass::AttachmentIndex_T depthIndex = createInfo.colorAttachmentCount;
for (uint8_t i = 0; i < createInfo.subpassCount; ++i) {
auto& sp = createInfo.pSubpasses[i];
switch (sp.depthStencilUsage) {
case RenderPass::DepthStencilUsage::READ:
subpasses[i].pDepthStencilAttachment = create_attach_ref_list(&depthIndex, 1,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, attachRefLists);
break;
case RenderPass::DepthStencilUsage::WRITE:
case RenderPass::DepthStencilUsage::READ_WRITE:
subpasses[i].pDepthStencilAttachment = create_attach_ref_list(&depthIndex, 1,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, attachRefLists);
break;
default:
break;
}
subpasses[i].inputAttachmentCount = static_cast<uint32_t>(sp.inputAttachmentCount);
if (sp.inputAttachmentCount > 0) {
subpasses[i].pInputAttachments = create_attach_ref_list(sp.inputAttachments,
sp.inputAttachmentCount, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
attachRefLists);
}
subpasses[i].colorAttachmentCount = static_cast<uint32_t>(sp.colorAttachmentCount);
if (sp.colorAttachmentCount > 0) {
subpasses[i].pColorAttachments = create_attach_ref_list(sp.colorAttachments,
sp.colorAttachmentCount, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
attachRefLists);
}
if (sp.resolveAttachmentCount > 0) {
subpasses[i].pResolveAttachments = create_attach_ref_list(sp.resolveAttachments,
sp.resolveAttachmentCount, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
attachRefLists);
}
}
}
static bool subpass_has_color_attachment(const RenderPass::SubpassInfo& subpass,
RenderPass::AttachmentIndex_T attachIndex) {
for (RenderPass::AttachmentCount_T i = 0; i < subpass.colorAttachmentCount; ++i) {
if (subpass.colorAttachments[i] == attachIndex) {
return true;
}
}
return false;
}
static bool subpass_has_resolve_attachment(const RenderPass::SubpassInfo& subpass,
RenderPass::AttachmentIndex_T attachIndex) {
for (RenderPass::AttachmentCount_T i = 0; i < subpass.resolveAttachmentCount; ++i) {
if (subpass.resolveAttachments[i] == attachIndex) {
return true;
}
}
return false;
}
static bool subpass_has_input_attachment(const RenderPass::SubpassInfo& subpass,
RenderPass::AttachmentIndex_T attachIndex) {
for (RenderPass::AttachmentCount_T i = 0; i < subpass.inputAttachmentCount; ++i) {
if (subpass.inputAttachments[i] == attachIndex) {
return true;
}
}
return false;
}
static void build_dependency(DependencyList& depList, uint8_t srcSubpass, uint8_t dstSubpass,
VkPipelineStageFlags srcStageMask, VkAccessFlags srcAccessMask,
VkPipelineStageFlags dstStageMask, VkAccessFlags dstAccessMask) {
auto key = static_cast<uint64_t>(srcSubpass) | (static_cast<uint64_t>(dstSubpass) << 8);
if (auto it = depList.lookup.find(key); it != depList.lookup.end()) {
auto& dep = depList.dependencies[it->second];
dep.srcStageMask |= srcStageMask;
dep.dstStageMask |= dstStageMask;
dep.srcAccessMask |= srcAccessMask;
dep.dstAccessMask |= dstAccessMask;
}
else {
depList.lookup.emplace(std::make_pair(key, depList.dependencies.size()));
depList.dependencies.push_back({
.srcSubpass = static_cast<uint32_t>(srcSubpass),
.dstSubpass = static_cast<uint32_t>(dstSubpass),
.srcStageMask = srcStageMask,
.dstStageMask = dstStageMask,
.srcAccessMask = srcAccessMask,
.dstAccessMask = dstAccessMask,
.dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT
});
}
}
static void build_preceding_dependencies(const RenderPass::CreateInfo& createInfo,
DependencyList& depList, uint8_t dstSubpass, RenderPass::AttachmentIndex_T attachIndex,
VkPipelineStageFlags dstStageMask, VkAccessFlags dstAccessMask) {
for (uint8_t srcSubpass = 0; srcSubpass < dstSubpass; ++srcSubpass) {
auto& sp = createInfo.pSubpasses[srcSubpass];
if (attachIndex == createInfo.colorAttachmentCount) {
switch (sp.depthStencilUsage) {
case RenderPass::DepthStencilUsage::READ:
build_dependency(depList, srcSubpass, dstSubpass,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, dstStageMask,
dstAccessMask);
break;
case RenderPass::DepthStencilUsage::WRITE:
build_dependency(depList, srcSubpass, dstSubpass,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, dstStageMask,
dstAccessMask);
break;
case RenderPass::DepthStencilUsage::READ_WRITE:
build_dependency(depList, srcSubpass, dstSubpass,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
| VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, dstStageMask,
dstAccessMask);
break;
default:
break;
}
}
else if (subpass_has_color_attachment(sp, attachIndex)
|| subpass_has_resolve_attachment(sp, attachIndex)) {
build_dependency(depList, srcSubpass, dstSubpass,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
dstStageMask, dstAccessMask);
}
else if (subpass_has_input_attachment(sp, attachIndex)) {
build_dependency(depList, srcSubpass, dstSubpass,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, dstStageMask,
dstAccessMask);
}
}
}
static void build_subpass_dependencies(const RenderPass::CreateInfo& createInfo,
DependencyList& depList) {
for (uint8_t subpassIndex = 1; subpassIndex < createInfo.subpassCount; ++subpassIndex) {
auto& sp = createInfo.pSubpasses[subpassIndex];
for (RenderPass::AttachmentCount_T i = 0; i < sp.colorAttachmentCount; ++i) {
build_preceding_dependencies(createInfo, depList, subpassIndex, sp.colorAttachments[i],
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
}
for (RenderPass::AttachmentCount_T i = 0; i < sp.inputAttachmentCount; ++i) {
build_preceding_dependencies(createInfo, depList, subpassIndex, sp.inputAttachments[i],
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
}
for (RenderPass::AttachmentCount_T i = 0; i < sp.resolveAttachmentCount; ++i) {
build_preceding_dependencies(createInfo, depList, subpassIndex,
sp.resolveAttachments[i], VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
}
switch (sp.depthStencilUsage) {
case RenderPass::DepthStencilUsage::READ:
build_preceding_dependencies(createInfo, depList, subpassIndex,
createInfo.colorAttachmentCount,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT);
break;
case RenderPass::DepthStencilUsage::WRITE:
build_preceding_dependencies(createInfo, depList, subpassIndex,
createInfo.colorAttachmentCount,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
break;
case RenderPass::DepthStencilUsage::READ_WRITE:
build_preceding_dependencies(createInfo, depList, subpassIndex,
createInfo.colorAttachmentCount,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
| VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
break;
default:
break;
}
}
}
static VkRenderPass render_pass_create(const RenderPass::CreateInfo& createInfo) {
VkRenderPass renderPass = VK_NULL_HANDLE;
auto hasDepthAttach = has_depth_attachment(createInfo);
VkAttachmentDescription attachDescs[RenderPass::MAX_COLOR_ATTACHMENTS + 1] = {};
std::vector<VkSubpassDescription> subpasses(createInfo.subpassCount);
std::vector<std::vector<VkAttachmentReference>> attachRefLists;
DependencyList dependencyList;
build_attachment_descriptions(createInfo, attachDescs, hasDepthAttach);
build_subpass_descriptions(createInfo, subpasses.data(), attachRefLists);
build_subpass_dependencies(createInfo, dependencyList);
VkRenderPassCreateInfo rpCreateInfo{
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.attachmentCount = static_cast<uint32_t>(createInfo.colorAttachmentCount) + hasDepthAttach,
.pAttachments = attachDescs,
.subpassCount = static_cast<uint32_t>(createInfo.subpassCount),
.pSubpasses = subpasses.data(),
.dependencyCount = static_cast<uint32_t>(dependencyList.dependencies.size()),
.pDependencies = dependencyList.dependencies.empty() ? nullptr
: dependencyList.dependencies.data()
};
if (vkCreateRenderPass(g_renderContext->get_device(), &rpCreateInfo, nullptr, &renderPass)
== VK_SUCCESS) {
return renderPass;
}
return VK_NULL_HANDLE;
}
static bool has_depth_attachment(const RenderPass::CreateInfo& info) {
return info.createFlags & (RenderPass::CREATE_FLAG_CLEAR_DEPTH_STENCIL
| RenderPass::CREATE_FLAG_LOAD_DEPTH_STENCIL
| RenderPass::CREATE_FLAG_STORE_DEPTH_STENCIL);
}
bool RenderPass::AttachmentInfo::operator==(const AttachmentInfo& other) const {
return format == other.format && samples == other.samples;
}
bool RenderPass::AttachmentInfo::operator!=(const AttachmentInfo& other) const {
return !(*this == other);
}
bool RenderPass::SubpassInfo::operator==(const SubpassInfo& other) const {
if (colorAttachmentCount != other.colorAttachmentCount
|| inputAttachmentCount != other.inputAttachmentCount
|| resolveAttachmentCount != other.resolveAttachmentCount
|| depthStencilUsage != other.depthStencilUsage) {
return false;
}
for (RenderPass::AttachmentCount_T i = 0; i < colorAttachmentCount; ++i) {
if (colorAttachments[i] != other.colorAttachments[i]) {
return false;
}
}
for (RenderPass::AttachmentCount_T i = 0; i < inputAttachmentCount; ++i) {
if (inputAttachments[i] != other.inputAttachments[i]) {
return false;
}
}
for (RenderPass::AttachmentCount_T i = 0; i < resolveAttachmentCount; ++i) {
if (resolveAttachments[i] != other.resolveAttachments[i]) {
return false;
}
}
return true;
}
bool RenderPass::SubpassInfo::operator!=(const SubpassInfo& other) const {
return !(*this == other);
}
bool RenderPass::CreateInfo::operator==(const CreateInfo& other) const {
if (colorAttachmentCount != other.colorAttachmentCount
|| clearAttachmentMask != other.clearAttachmentMask
|| loadAttachmentMask != other.loadAttachmentMask
|| storeAttachmentMask != other.storeAttachmentMask
|| subpassCount != other.subpassCount || createFlags != other.createFlags) {
return false;
}
if (has_depth_attachment(*this) && depthAttachment != other.depthAttachment) {
return false;
}
for (RenderPass::AttachmentCount_T i = 0; i < colorAttachmentCount; ++i) {
if (colorAttachments[i] != other.colorAttachments[i]) {
return false;
}
}
for (uint8_t i = 0; i < subpassCount; ++i) {
if (pSubpasses[i] != other.pSubpasses[i]) {
return false;
}
}
return true;
}
size_t RenderPassCreateInfoHash::operator()(const RenderPass::CreateInfo& info) const {
HashBuilder hb{};
for (RenderPass::AttachmentCount_T i = 0; i < info.colorAttachmentCount; ++i) {
auto& attach = info.colorAttachments[i];
hb.add_uint32(static_cast<uint32_t>(attach.format));
hb.add_uint32(static_cast<uint32_t>(attach.samples));
}
if (has_depth_attachment(info)) {
hb.add_uint32(static_cast<uint32_t>(info.depthAttachment.format));
hb.add_uint32(static_cast<uint32_t>(info.depthAttachment.samples));
}
for (uint8_t i = 0; i < info.subpassCount; ++i) {
auto& subpass = info.pSubpasses[i];
hb.add_uint32(static_cast<uint32_t>(subpass.colorAttachmentCount)
| (static_cast<uint32_t>(subpass.inputAttachmentCount) << 8)
| (static_cast<uint32_t>(subpass.resolveAttachmentCount) << 16)
| (static_cast<uint32_t>(subpass.depthStencilUsage) << 24));
}
hb.add_uint32(static_cast<uint32_t>(info.colorAttachmentCount)
| (static_cast<uint32_t>(info.clearAttachmentMask) << 8)
| (static_cast<uint32_t>(info.loadAttachmentMask) << 16)
| (static_cast<uint32_t>(info.storeAttachmentMask) << 24));
hb.add_uint32(static_cast<uint32_t>(info.swapchainAttachmentMask)
| (static_cast<uint32_t>(info.subpassCount) << 8)
| (static_cast<uint32_t>(info.createFlags) << 16));
return hb.get();
}
|
whupdup/frame
|
real/graphics/render_pass.cpp
|
C++
|
gpl-3.0
| 19,139
|
#pragma once
#include <core/intrusive_ptr.hpp>
#include <graphics/unique_graphics_object.hpp>
#include <volk.h>
namespace ZN::GFX {
class RenderPass final : public Memory::ThreadSafeIntrusivePtrEnabled<RenderPass>,
public UniqueGraphicsObject {
public:
using AttachmentCount_T = uint8_t;
using AttachmentBitMask_T = uint8_t;
using AttachmentIndex_T = uint8_t;
using CreateFlags = uint8_t;
static constexpr const uint8_t MAX_COLOR_ATTACHMENTS = 8;
static constexpr const AttachmentIndex_T INVALID_ATTACHMENT_INDEX = ~0;
enum class DepthStencilUsage {
NONE,
READ,
WRITE,
READ_WRITE
};
enum CreateFlagBits : CreateFlags {
CREATE_FLAG_CLEAR_DEPTH_STENCIL = 0b001,
CREATE_FLAG_LOAD_DEPTH_STENCIL = 0b010,
CREATE_FLAG_STORE_DEPTH_STENCIL = 0b100
};
struct AttachmentInfo {
VkFormat format;
VkSampleCountFlagBits samples;
bool operator==(const AttachmentInfo&) const;
bool operator!=(const AttachmentInfo&) const;
};
struct SubpassInfo {
AttachmentIndex_T colorAttachments[MAX_COLOR_ATTACHMENTS];
AttachmentIndex_T inputAttachments[MAX_COLOR_ATTACHMENTS];
AttachmentIndex_T resolveAttachments[MAX_COLOR_ATTACHMENTS];
AttachmentCount_T colorAttachmentCount;
AttachmentCount_T inputAttachmentCount;
AttachmentCount_T resolveAttachmentCount;
DepthStencilUsage depthStencilUsage;
bool operator==(const SubpassInfo&) const;
bool operator!=(const SubpassInfo&) const;
};
struct CreateInfo {
AttachmentInfo colorAttachments[MAX_COLOR_ATTACHMENTS];
AttachmentInfo depthAttachment;
SubpassInfo* pSubpasses;
AttachmentCount_T colorAttachmentCount;
AttachmentBitMask_T clearAttachmentMask;
AttachmentBitMask_T loadAttachmentMask;
AttachmentBitMask_T storeAttachmentMask;
AttachmentBitMask_T swapchainAttachmentMask;
uint8_t subpassCount;
CreateFlags createFlags;
bool operator==(const CreateInfo&) const;
};
static IntrusivePtr<RenderPass> create(const CreateInfo& createInfo);
~RenderPass();
NULL_COPY_AND_ASSIGN(RenderPass);
operator VkRenderPass() const;
VkRenderPass get_render_pass() const;
private:
VkRenderPass m_renderPass;
explicit RenderPass(VkRenderPass);
};
}
|
whupdup/frame
|
real/graphics/render_pass.hpp
|
C++
|
gpl-3.0
| 2,220
|
#pragma once
#include <cstdint>
namespace ZN::GFX {
constexpr uint32_t previous_power_of_2(uint32_t value) {
uint32_t r = 1;
while (2 * r < value) {
r *= 2;
}
return r;
}
constexpr uint32_t get_image_mip_levels(uint32_t width, uint32_t height) {
uint32_t result = 1;
while (width > 1 || height > 1) {
++result;
width >>= 1;
height >>= 1;
}
return result;
}
constexpr uint32_t get_group_count(uint32_t threadCount, uint32_t localSize) {
return (threadCount + localSize - 1) / localSize;
}
constexpr size_t get_image_total_size_pixels(uint32_t width, uint32_t height, uint32_t mipLevels) {
size_t result = 0;
while (mipLevels > 0 && (width > 1 || height > 1)) {
result += width * height;
if (width > 1) {
width /= 2;
}
if (height > 1) {
height /= 2;
}
--mipLevels;
}
result += (width == 1) && (height == 1);
return result;
}
}
|
whupdup/frame
|
real/graphics/render_utils.hpp
|
C++
|
gpl-3.0
| 884
|
#include "sampler.hpp"
#include <graphics/render_context.hpp>
using namespace ZN;
using namespace ZN::GFX;
Memory::IntrusivePtr<Sampler> Sampler::create(const VkSamplerCreateInfo& createInfo) {
VkSampler sampler;
if (vkCreateSampler(g_renderContext->get_device(), &createInfo, nullptr, &sampler)
== VK_SUCCESS) {
return Memory::IntrusivePtr<Sampler>(new Sampler(sampler));
}
return {};
}
Sampler::Sampler(VkSampler sampler)
: m_sampler(sampler) {}
Sampler::~Sampler() {
if (m_sampler != VK_NULL_HANDLE) {
g_renderContext->queue_delete([sampler=this->m_sampler] {
vkDestroySampler(g_renderContext->get_device(), sampler, nullptr);
});
}
}
Sampler::operator VkSampler() const {
return m_sampler;
}
VkSampler Sampler::get_sampler() const {
return m_sampler;
}
|
whupdup/frame
|
real/graphics/sampler.cpp
|
C++
|
gpl-3.0
| 788
|
#pragma once
#include <core/intrusive_ptr.hpp>
#include <volk.h>
namespace ZN::GFX {
class Sampler final : public Memory::IntrusivePtrEnabled<Sampler> {
public:
static Memory::IntrusivePtr<Sampler> create(const VkSamplerCreateInfo&);
~Sampler();
NULL_COPY_AND_ASSIGN(Sampler);
operator VkSampler() const;
VkSampler get_sampler() const;
private:
VkSampler m_sampler;
explicit Sampler(VkSampler);
};
}
|
whupdup/frame
|
real/graphics/sampler.hpp
|
C++
|
gpl-3.0
| 427
|
#pragma once
#include <cstdint>
namespace ZN::GFX {
enum class SamplerIndex : uint32_t {
LINEAR = 0,
NEAREST = 1
};
}
|
whupdup/frame
|
real/graphics/sampler_index.hpp
|
C++
|
gpl-3.0
| 125
|
#include "semaphore_pool.hpp"
#include <graphics/render_context.hpp>
using namespace ZN;
using namespace ZN::GFX;
SemaphorePool::~SemaphorePool() {
for (auto sem : m_semaphores) {
vkDestroySemaphore(g_renderContext->get_device(), sem, nullptr);
}
}
VkSemaphore SemaphorePool::acquire_semaphore() {
if (!m_freeSemaphores.empty()) {
auto sem = m_freeSemaphores.back();
m_freeSemaphores.pop_back();
return sem;
}
VkSemaphoreCreateInfo createInfo{
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
.pNext = nullptr,
.flags = 0
};
VkSemaphore sem;
if (vkCreateSemaphore(g_renderContext->get_device(), &createInfo, nullptr, &sem)
== VK_SUCCESS) {
m_semaphores.push_back(sem);
return sem;
}
return VK_NULL_HANDLE;
}
void SemaphorePool::release_semaphore(VkSemaphore sem) {
if (sem == VK_NULL_HANDLE) {
return;
}
m_freeSemaphores.push_back(sem);
}
|
whupdup/frame
|
real/graphics/semaphore_pool.cpp
|
C++
|
gpl-3.0
| 887
|